code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import math import sys def __a ( __lowerCamelCase ): UpperCAmelCase_ : Tuple = "" try: with open(__lowerCamelCase, "rb" ) as binary_file: UpperCAmelCase_ : Union[str, Any] = binary_file.read() for dat in data: UpperCAmelCase_ : List[str] = f"""{dat:08b}""" result += curr_byte return result except OSError: print("File not accessible" ) sys.exit() def __a ( __lowerCamelCase ): UpperCAmelCase_ : int = {"0": "0", "1": "1"} UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = "", "" UpperCAmelCase_ : str = len(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue UpperCAmelCase_ : Optional[Any] = lexicon[curr_string] result += last_match_id UpperCAmelCase_ : Union[str, Any] = last_match_id + "0" if math.loga(__lowerCamelCase ).is_integer(): UpperCAmelCase_ : Optional[int] = {} for curr_key in list(__lowerCamelCase ): UpperCAmelCase_ : Dict = lexicon.pop(__lowerCamelCase ) UpperCAmelCase_ : int = new_lex UpperCAmelCase_ : List[str] = last_match_id + "1" index += 1 UpperCAmelCase_ : str = "" return result def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[Any] = 8 try: with open(__lowerCamelCase, "wb" ) as opened_file: UpperCAmelCase_ : Optional[Any] = [ to_write[i : i + byte_length] for i in range(0, len(__lowerCamelCase ), __lowerCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("10000000" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(__lowerCamelCase, 2 ).to_bytes(1, byteorder="big" ) ) except OSError: print("File not accessible" ) sys.exit() def __a ( __lowerCamelCase ): UpperCAmelCase_ : Tuple = 0 for letter in data_bits: if letter == "1": break counter += 1 UpperCAmelCase_ : int = data_bits[counter:] UpperCAmelCase_ : Optional[int] = data_bits[counter + 1 :] return data_bits def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[Any] = read_file_binary(__lowerCamelCase ) UpperCAmelCase_ : str = remove_prefix(__lowerCamelCase ) UpperCAmelCase_ : Any = decompress_data(__lowerCamelCase ) write_file_binary(__lowerCamelCase, __lowerCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
23
"""simple docstring""" import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ ) UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )] UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin" ) for f in files ) @slow @require_flax class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ ) UpperCAmelCase_ : Tuple = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 ) UpperCAmelCase_ : List[str] = 4 UpperCAmelCase_ : Tuple = jax.device_count() UpperCAmelCase_ : Optional[int] = num_samples * [prompt] UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : int = replicate(lowercase_ ) UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3 assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1 UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(lowercase_ ) == num_samples def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ ) UpperCAmelCase_ : Optional[int] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : str = jax.random.PRNGKey(0 ) UpperCAmelCase_ : Union[str, Any] = 50 UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[str] = num_samples * [prompt] UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Any = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ ) UpperCAmelCase_ : Any = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : str = jax.random.PRNGKey(0 ) UpperCAmelCase_ : str = 50 UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Dict = replicate(lowercase_ ) UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ ) UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa ) UpperCAmelCase_ : List[Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 ) UpperCAmelCase_ : Optional[int] = 50 UpperCAmelCase_ : Optional[int] = jax.device_count() UpperCAmelCase_ : str = num_samples * [prompt] UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ ) UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = shard(lowercase_ ) UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , ) UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , ) UpperCAmelCase_ : List[Any] = scheduler.create_state() UpperCAmelCase_ : int = scheduler_state UpperCAmelCase_ : Union[str, Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 ) UpperCAmelCase_ : int = 50 UpperCAmelCase_ : str = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : int = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = shard(lowercase_ ) UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ ) UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , ) UpperCAmelCase_ : Any = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1] # With memory efficient attention UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , ) UpperCAmelCase_ : str = replicate(lowercase_ ) UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ ) UpperCAmelCase_ : Optional[int] = shard(lowercase_ ) UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
23
1
"""simple docstring""" import os from datetime import datetime as dt from github import Github _a = [ 'good first issue', 'feature request', 'wip', ] def __a ( ): UpperCAmelCase_ : Optional[Any] = Github(os.environ["GITHUB_TOKEN"] ) UpperCAmelCase_ : List[Any] = g.get_repo("huggingface/accelerate" ) UpperCAmelCase_ : Union[str, Any] = repo.get_issues(state="open" ) for issue in open_issues: UpperCAmelCase_ : Union[str, Any] = sorted([comment for comment in issue.get_comments()], key=lambda __lowerCamelCase : i.created_at, reverse=__lowerCamelCase ) UpperCAmelCase_ : Any = comments[0] if len(__lowerCamelCase ) > 0 else None UpperCAmelCase_ : Optional[Any] = dt.utcnow() UpperCAmelCase_ : Optional[int] = (current_time - issue.updated_at).days UpperCAmelCase_ : str = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="closed" ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
23
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean _a = 0 _a = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right _a = tuple[int, int] class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : int = pos_x UpperCAmelCase_ : List[Any] = pos_y UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x) UpperCAmelCase_ : Any = goal_x UpperCAmelCase_ : Dict = goal_y UpperCAmelCase_ : Any = g_cost UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = self.calculate_heuristic() UpperCAmelCase_ : Any = self.g_cost + self.h_cost def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowercase_ ) + abs(lowercase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , lowercase_ ): """simple docstring""" return self.f_cost < other.f_cost class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ ) UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ ) UpperCAmelCase_ : str = [self.start] UpperCAmelCase_ : list[Node] = [] UpperCAmelCase_ : int = False def UpperCamelCase__ ( self ): """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowercase_ ) self.closed_nodes.append(lowercase_ ) UpperCAmelCase_ : str = self.get_successors(lowercase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowercase_ ) else: # retrieve the best current path UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowercase_ ) else: self.open_nodes.append(lowercase_ ) return [self.start.pos] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Any = [] for action in delta: UpperCAmelCase_ : str = parent.pos_x + action[1] UpperCAmelCase_ : int = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) ) return successors def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = node UpperCAmelCase_ : int = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Optional[int] = current_node.parent path.reverse() return path class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = False def UpperCamelCase__ ( self ): """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 ) UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowercase_ , lowercase_ ) self.fwd_astar.closed_nodes.append(lowercase_ ) self.bwd_astar.closed_nodes.append(lowercase_ ) UpperCAmelCase_ : Tuple = current_bwd_node UpperCAmelCase_ : str = current_fwd_node UpperCAmelCase_ : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowercase_ ) else: # retrieve the best current path UpperCAmelCase_ : List[Any] = astar.open_nodes.pop( astar.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowercase_ ) else: astar.open_nodes.append(lowercase_ ) return [self.fwd_astar.start.pos] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ ) UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ : Any = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] _a = (0, 0) _a = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) _a = time.time() _a = AStar(init, goal) _a = a_star.search() _a = time.time() - start_time print(f"""AStar execution time = {end_time:f} seconds""") _a = time.time() _a = BidirectionalAStar(init, goal) _a = time.time() - bd_start_time print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
23
1
"""simple docstring""" import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets _a = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' _a = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' _a = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = 0.0 for i, j in zip(lowercase_ , lowercase_ ): n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_ ) else 0.0 UpperCAmelCase_ : Optional[Any] = n_correct / len(lowercase_ ) return { "accuracy": accuracy, }
23
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,) SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),) def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : int = { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowercase_ ) return config def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = dict(self.forward_default_kwargs ) UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = self.dummy_sample UpperCAmelCase_ : Dict = 0.1 * sample UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals UpperCAmelCase_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals UpperCAmelCase_ : int = dummy_past_residuals[:] UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs ) UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ ) UpperCAmelCase_ : Optional[int] = self.dummy_sample UpperCAmelCase_ : List[str] = 0.1 * sample UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : str = self.get_scheduler_config() UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:] UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = self.scheduler_classes[0] UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ ) UpperCAmelCase_ : Tuple = 10 UpperCAmelCase_ : List[str] = self.dummy_model() UpperCAmelCase_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample return sample def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = dict(self.forward_default_kwargs ) UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ ) for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : Any = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ ) UpperCAmelCase_ : str = self.dummy_sample UpperCAmelCase_ : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ): scheduler.set_timesteps(lowercase_ ) elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ): UpperCAmelCase_ : List[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase_ : List[str] = dummy_past_residuals[:] UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase__ ( self ): """simple docstring""" for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_ ) UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0] UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def UpperCamelCase__ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 UpperCAmelCase_ : List[Any] = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : List[Any] = self.dummy_sample UpperCAmelCase_ : Optional[int] = 0.1 * sample UpperCAmelCase_ : List[str] = self.get_scheduler_config() UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample def UpperCamelCase__ ( self ): """simple docstring""" with self.assertRaises(lowercase_ ): UpperCAmelCase_ : List[str] = self.scheduler_classes[0] UpperCAmelCase_ : str = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.full_loop() UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2 assert abs(result_mean.item() - 0.25_80 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 67.39_86 ) < 1E-2 assert abs(result_mean.item() - 0.08_78 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2 assert abs(result_mean.item() - 0.29_95 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2 assert abs(result_mean.item() - 0.24_34 ) < 1E-3
23
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor _a = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __a ( __lowerCamelCase ): if isinstance(__lowerCamelCase, torch.Tensor ): return image elif isinstance(__lowerCamelCase, PIL.Image.Image ): UpperCAmelCase_ : Union[str, Any] = [image] UpperCAmelCase_ : List[Any] = [trans(img.convert("RGB" ) ) for img in image] UpperCAmelCase_ : Union[str, Any] = torch.stack(__lowerCamelCase ) return image class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM UpperCAmelCase_ : str = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" if strength < 0 or strength > 1: raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" # get the original timestep using init_timestep UpperCAmelCase_ : Union[str, Any] = min(int(num_inference_steps * strength ) , lowercase_ ) UpperCAmelCase_ : List[str] = max(num_inference_steps - init_timestep , 0 ) UpperCAmelCase_ : int = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ): """simple docstring""" if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" ) UpperCAmelCase_ : Optional[int] = image.to(device=lowercase_ , dtype=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCAmelCase_ : str = init_latents.shape UpperCAmelCase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents print("add noise to latents at timestep" , lowercase_ ) UpperCAmelCase_ : Optional[Any] = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = init_latents return latents @torch.no_grad() def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 50 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ): """simple docstring""" self.check_inputs(lowercase_ ) # 2. Preprocess image UpperCAmelCase_ : List[str] = preprocess(lowercase_ ) # 3. set timesteps self.scheduler.set_timesteps(lowercase_ , device=self.device ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.get_timesteps(lowercase_ , lowercase_ , self.device ) UpperCAmelCase_ : Any = timesteps[:1].repeat(lowercase_ ) # 4. Prepare latent variables UpperCAmelCase_ : Dict = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ ) UpperCAmelCase_ : List[str] = latents # 5. Denoising loop for t in self.progress_bar(lowercase_ ): # 1. predict noise model_output UpperCAmelCase_ : List[str] = self.unet(lowercase_ , lowercase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 UpperCAmelCase_ : List[str] = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample UpperCAmelCase_ : str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase_ : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase_ : List[str] = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=lowercase_ )
23
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _a = object() # For specifying empty leaf dict `{}` _a = object() def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ): UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )] if matches and all(__lowerCamelCase ): return True return False def __a ( __lowerCamelCase ): def replace(__lowerCamelCase, __lowerCamelCase ): for rule, replacement in rules: if _match(__lowerCamelCase, __lowerCamelCase ): return replacement return val return replace def __a ( ): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )), (("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )), (("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def __a ( __lowerCamelCase ): UpperCAmelCase_ : List[str] = _get_partition_rules() UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase ) UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )} UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCamelCase ) )
23
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """donut-swin""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.02 , lowercase_=1E-5 , **lowercase_ , ): """simple docstring""" super().__init__(**lowercase_ ) UpperCAmelCase_ : List[Any] = image_size UpperCAmelCase_ : Optional[Any] = patch_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : Optional[int] = embed_dim UpperCAmelCase_ : Optional[int] = depths UpperCAmelCase_ : List[str] = len(lowercase_ ) UpperCAmelCase_ : int = num_heads UpperCAmelCase_ : Dict = window_size UpperCAmelCase_ : Tuple = mlp_ratio UpperCAmelCase_ : List[str] = qkv_bias UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = drop_path_rate UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Tuple = use_absolute_embeddings UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : Optional[Any] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase_ : List[Any] = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
23
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _a = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )] if identifier is not None: UpperCAmelCase_ : Dict = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase_ , lowercase_ ): for n_ in n_identifier: UpperCAmelCase_ : str = [file for file in files if n_ not in file] else: UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file] UpperCAmelCase_ : Union[str, Any] = ignore_files or [] ignore_files.append("__init__.py" ) UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , lowercase_ ) if only_modules: UpperCAmelCase_ : str = file.split("." )[0] try: UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ ) UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F"""{module_identifier} is not a module.""" ) else: UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = Path("src/transformers" ) UpperCAmelCase_ : str = "modeling" UpperCAmelCase_ : Optional[Any] = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = Path("src/transformers" ) UpperCAmelCase_ : Any = "tokenization" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = "configuration" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"] self.analyze_directory(lowercase_ , n_identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = Path("docs/source" ) UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"] self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
23
1
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _a = logging.get_logger(__name__) # pylint: disable=invalid-name def __a ( __lowerCamelCase ): warnings.warn( "The preprocess method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor.preprocess instead", __lowerCamelCase, ) if isinstance(__lowerCamelCase, torch.Tensor ): return image elif isinstance(__lowerCamelCase, PIL.Image.Image ): UpperCAmelCase_ : int = [image] if isinstance(image[0], PIL.Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = image[0].size UpperCAmelCase_ , UpperCAmelCase_ : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 UpperCAmelCase_ : Tuple = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] UpperCAmelCase_ : Dict = np.concatenate(__lowerCamelCase, axis=0 ) UpperCAmelCase_ : Union[str, Any] = np.array(__lowerCamelCase ).astype(np.floataa ) / 255.0 UpperCAmelCase_ : Optional[int] = image.transpose(0, 3, 1, 2 ) UpperCAmelCase_ : List[str] = 2.0 * image - 1.0 UpperCAmelCase_ : int = torch.from_numpy(__lowerCamelCase ) elif isinstance(image[0], torch.Tensor ): UpperCAmelCase_ : List[Any] = torch.cat(__lowerCamelCase, dim=0 ) return image def __a ( __lowerCamelCase ): if isinstance(__lowerCamelCase, torch.Tensor ): return mask elif isinstance(__lowerCamelCase, PIL.Image.Image ): UpperCAmelCase_ : Tuple = [mask] if isinstance(mask[0], PIL.Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = mask[0].size UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCAmelCase_ : List[Any] = [np.array(m.convert("L" ).resize((w, h), resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask] UpperCAmelCase_ : Any = np.concatenate(__lowerCamelCase, axis=0 ) UpperCAmelCase_ : Union[str, Any] = mask.astype(np.floataa ) / 255.0 UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Any = torch.from_numpy(__lowerCamelCase ) elif isinstance(mask[0], torch.Tensor ): UpperCAmelCase_ : List[str] = torch.cat(__lowerCamelCase, dim=0 ) return mask class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : UNetaDModel SCREAMING_SNAKE_CASE__ : RePaintScheduler def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) @torch.no_grad() def __call__( self , lowercase_ , lowercase_ , lowercase_ = 250 , lowercase_ = 0.0 , lowercase_ = 10 , lowercase_ = 10 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : List[str] = image UpperCAmelCase_ : Optional[Any] = _preprocess_image(lowercase_ ) UpperCAmelCase_ : Optional[int] = original_image.to(device=self.device , dtype=self.unet.dtype ) UpperCAmelCase_ : Optional[Any] = _preprocess_mask(lowercase_ ) UpperCAmelCase_ : str = mask_image.to(device=self.device , dtype=self.unet.dtype ) UpperCAmelCase_ : Union[str, Any] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCAmelCase_ : Dict = original_image.shape UpperCAmelCase_ : List[str] = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowercase_ , lowercase_ , lowercase_ , self.device ) UpperCAmelCase_ : Optional[Any] = eta UpperCAmelCase_ : List[str] = self.scheduler.timesteps[0] + 1 UpperCAmelCase_ : Dict = generator[0] if isinstance(lowercase_ , lowercase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual UpperCAmelCase_ : int = self.unet(lowercase_ , lowercase_ ).sample # compute previous image: x_t -> x_t-1 UpperCAmelCase_ : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t UpperCAmelCase_ : Optional[Any] = self.scheduler.undo_step(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase_ : List[Any] = t UpperCAmelCase_ : str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase_ : List[str] = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
23
"""simple docstring""" import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef _a = ( 'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' ) def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) return (preds == labels).mean() def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0] UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "mrpc": return acc_and_fa(__lowerCamelCase, __lowerCamelCase ) elif task_name == "sts-b": return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase ) elif task_name == "qqp": return acc_and_fa(__lowerCamelCase, __lowerCamelCase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} else: raise KeyError(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} else: raise KeyError(__lowerCamelCase )
23
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json', } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """lxmert""" SCREAMING_SNAKE_CASE__ : Tuple = {} def __init__( self , lowercase_=3_0522 , lowercase_=768 , lowercase_=12 , lowercase_=9500 , lowercase_=1600 , lowercase_=400 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=9 , lowercase_=5 , lowercase_=5 , lowercase_=2048 , lowercase_=4 , lowercase_=6.67 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : List[Any] = hidden_size UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : Optional[int] = hidden_act UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : List[str] = attention_probs_dropout_prob UpperCAmelCase_ : List[str] = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : Tuple = num_qa_labels UpperCAmelCase_ : Optional[Any] = num_object_labels UpperCAmelCase_ : List[Any] = num_attr_labels UpperCAmelCase_ : Optional[Any] = l_layers UpperCAmelCase_ : Tuple = x_layers UpperCAmelCase_ : Tuple = r_layers UpperCAmelCase_ : Union[str, Any] = visual_feat_dim UpperCAmelCase_ : Optional[Any] = visual_pos_dim UpperCAmelCase_ : Union[str, Any] = visual_loss_normalizer UpperCAmelCase_ : Optional[Any] = task_matched UpperCAmelCase_ : Tuple = task_mask_lm UpperCAmelCase_ : int = task_obj_predict UpperCAmelCase_ : str = task_qa UpperCAmelCase_ : str = visual_obj_loss UpperCAmelCase_ : Union[str, Any] = visual_attr_loss UpperCAmelCase_ : Any = visual_feat_loss UpperCAmelCase_ : Dict = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} super().__init__(**lowercase_ )
23
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = {'vocab_file': 'vocab.json'} _a = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } _a = {'mgp-str': 27} class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ): """simple docstring""" super().__init__( unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , ) with open(lowercase_ , encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : Dict = json.load(lowercase_ ) UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()} @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [] for s in text: char_tokens.extend(lowercase_ ) return char_tokens def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.decoder.get(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" if not os.path.isdir(lowercase_ ): logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) ) return UpperCAmelCase_ : Optional[int] = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(lowercase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" ) return (vocab_file,)
23
1
"""simple docstring""" import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig _a = logging.get_logger(__name__) class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = question_encoder UpperCAmelCase_ : int = generator UpperCAmelCase_ : List[Any] = self.question_encoder def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" if os.path.isfile(lowercase_ ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) UpperCAmelCase_ : int = os.path.join(lowercase_ , "question_encoder_tokenizer" ) UpperCAmelCase_ : Any = os.path.join(lowercase_ , "generator_tokenizer" ) self.question_encoder.save_pretrained(lowercase_ ) self.generator.save_pretrained(lowercase_ ) @classmethod def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ): """simple docstring""" # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer UpperCAmelCase_ : Optional[int] = kwargs.pop("config" , lowercase_ ) if config is None: UpperCAmelCase_ : Dict = RagConfig.from_pretrained(lowercase_ ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained( lowercase_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained( lowercase_ , config=config.generator , subfolder="generator_tokenizer" ) return cls(question_encoder=lowercase_ , generator=lowercase_ ) def __call__( self , *lowercase_ , **lowercase_ ): """simple docstring""" return self.current_tokenizer(*lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ): """simple docstring""" return self.generator.batch_decode(*lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ): """simple docstring""" return self.generator.decode(*lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.question_encoder def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = self.generator def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = "longest" , lowercase_ = None , lowercase_ = True , **lowercase_ , ): """simple docstring""" warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details" , lowercase_ , ) if max_length is None: UpperCAmelCase_ : int = self.current_tokenizer.model_max_length UpperCAmelCase_ : Union[str, Any] = self( lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , max_length=lowercase_ , padding=lowercase_ , truncation=lowercase_ , **lowercase_ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: UpperCAmelCase_ : List[str] = self.current_tokenizer.model_max_length UpperCAmelCase_ : Union[str, Any] = self( text_target=lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , **lowercase_ , ) UpperCAmelCase_ : List[Any] = labels["input_ids"] return model_inputs
23
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency _a = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } _a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' _a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def __a ( __lowerCamelCase ): return x[0] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase ) UpperCAmelCase_ : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase ) UpperCAmelCase_ : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase ) UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] ) UpperCAmelCase_ : str = list(freq_to_letter_str.items() ) freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase ) UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(__lowerCamelCase ) def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase ) UpperCAmelCase_ : int = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
23
1
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig _a = logging.get_logger(__name__) # General docstring _a = 'RegNetConfig' # Base docstring _a = 'facebook/regnet-y-040' _a = [1, 1_088, 7, 7] # Image classification docstring _a = 'facebook/regnet-y-040' _a = 'tabby, tabby cat' _a = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ = 3 , lowercase_ = 1 , lowercase_ = 1 , lowercase_ = "relu" , ): """simple docstring""" super().__init__() UpperCAmelCase_ : Optional[Any] = nn.Convad( lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , ) UpperCAmelCase_ : List[Any] = nn.BatchNormad(lowercase_ ) UpperCAmelCase_ : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity() def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = self.convolution(lowercase_ ) UpperCAmelCase_ : Dict = self.normalization(lowercase_ ) UpperCAmelCase_ : Tuple = self.activation(lowercase_ ) return hidden_state class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ ): """simple docstring""" super().__init__() UpperCAmelCase_ : str = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) UpperCAmelCase_ : Dict = config.num_channels def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) UpperCAmelCase_ : Optional[Any] = self.embedder(lowercase_ ) return hidden_state class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ = 2 ): """simple docstring""" super().__init__() UpperCAmelCase_ : Tuple = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ ) UpperCAmelCase_ : Optional[int] = nn.BatchNormad(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self.convolution(lowercase_ ) UpperCAmelCase_ : Optional[int] = self.normalization(lowercase_ ) return hidden_state class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" super().__init__() UpperCAmelCase_ : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) UpperCAmelCase_ : List[Any] = nn.Sequential( nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" # b c h w -> b c 1 1 UpperCAmelCase_ : List[str] = self.pooler(lowercase_ ) UpperCAmelCase_ : Union[str, Any] = self.attention(lowercase_ ) UpperCAmelCase_ : Dict = hidden_state * attention return hidden_state class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1 ): """simple docstring""" super().__init__() UpperCAmelCase_ : Optional[Any] = in_channels != out_channels or stride != 1 UpperCAmelCase_ : List[str] = max(1 , out_channels // config.groups_width ) UpperCAmelCase_ : Tuple = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase_ : Optional[int] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) UpperCAmelCase_ : Optional[Any] = ACTaFN[config.hidden_act] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = hidden_state UpperCAmelCase_ : int = self.layer(lowercase_ ) UpperCAmelCase_ : List[Any] = self.shortcut(lowercase_ ) hidden_state += residual UpperCAmelCase_ : Tuple = self.activation(lowercase_ ) return hidden_state class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1 ): """simple docstring""" super().__init__() UpperCAmelCase_ : int = in_channels != out_channels or stride != 1 UpperCAmelCase_ : Any = max(1 , out_channels // config.groups_width ) UpperCAmelCase_ : Dict = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase_ : Union[str, Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) UpperCAmelCase_ : int = ACTaFN[config.hidden_act] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : int = hidden_state UpperCAmelCase_ : str = self.layer(lowercase_ ) UpperCAmelCase_ : List[str] = self.shortcut(lowercase_ ) hidden_state += residual UpperCAmelCase_ : Optional[int] = self.activation(lowercase_ ) return hidden_state class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 2 , lowercase_ = 2 , ): """simple docstring""" super().__init__() UpperCAmelCase_ : Optional[Any] = RegNetXLayer if config.layer_type == "x" else RegNetYLayer UpperCAmelCase_ : Tuple = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = self.layers(lowercase_ ) return hidden_state class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ ): """simple docstring""" super().__init__() UpperCAmelCase_ : Dict = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) UpperCAmelCase_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ): self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = False , lowercase_ = True ): """simple docstring""" UpperCAmelCase_ : int = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCAmelCase_ : Optional[Any] = hidden_states + (hidden_state,) UpperCAmelCase_ : Union[str, Any] = stage_module(lowercase_ ) if output_hidden_states: UpperCAmelCase_ : str = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ ) class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = RegNetConfig SCREAMING_SNAKE_CASE__ : List[Any] = """regnet""" SCREAMING_SNAKE_CASE__ : Any = """pixel_values""" SCREAMING_SNAKE_CASE__ : Optional[Any] = True def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" if isinstance(lowercase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def UpperCamelCase__ ( self , lowercase_ , lowercase_=False ): """simple docstring""" if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : Optional[Any] = value _a = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _a = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( """The bare RegNet model outputting raw features without any specific head on top.""" ,lowercase__ ,) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ ): """simple docstring""" super().__init__(lowercase_ ) UpperCAmelCase_ : int = config UpperCAmelCase_ : List[str] = RegNetEmbeddings(lowercase_ ) UpperCAmelCase_ : Tuple = RegNetEncoder(lowercase_ ) UpperCAmelCase_ : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None ): """simple docstring""" UpperCAmelCase_ : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : int = self.embedder(lowercase_ ) UpperCAmelCase_ : Dict = self.encoder( lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) UpperCAmelCase_ : Optional[int] = encoder_outputs[0] UpperCAmelCase_ : Optional[int] = self.pooler(lowercase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( """ RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ ,lowercase__ ,) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ ): """simple docstring""" super().__init__(lowercase_ ) UpperCAmelCase_ : Optional[int] = config.num_labels UpperCAmelCase_ : Optional[int] = RegNetModel(lowercase_ ) # classification head UpperCAmelCase_ : Union[str, Any] = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase__ ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" UpperCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : List[str] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) UpperCAmelCase_ : List[str] = outputs.pooler_output if return_dict else outputs[1] UpperCAmelCase_ : Any = self.classifier(lowercase_ ) UpperCAmelCase_ : Optional[Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCAmelCase_ : str = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCAmelCase_ : int = "single_label_classification" else: UpperCAmelCase_ : int = "multi_label_classification" if self.config.problem_type == "regression": UpperCAmelCase_ : Tuple = MSELoss() if self.num_labels == 1: UpperCAmelCase_ : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: UpperCAmelCase_ : Any = loss_fct(lowercase_ , lowercase_ ) elif self.config.problem_type == "single_label_classification": UpperCAmelCase_ : str = CrossEntropyLoss() UpperCAmelCase_ : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": UpperCAmelCase_ : List[str] = BCEWithLogitsLoss() UpperCAmelCase_ : List[Any] = loss_fct(lowercase_ , lowercase_ ) if not return_dict: UpperCAmelCase_ : Optional[int] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
23
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _a = logging.getLogger() def __a ( ): UpperCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ : Dict = parser.parse_args() return args.f class A_ (lowercase__ ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(lowercase_ , "argv" , lowercase_ ): UpperCAmelCase_ : List[str] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(lowercase_ , 0.6_66 ) @slow @require_torch_non_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ )
23
1
"""simple docstring""" import os import numpy import onnx def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[Any] = a.name UpperCAmelCase_ : Any = b.name UpperCAmelCase_ : List[str] = "" UpperCAmelCase_ : Dict = "" UpperCAmelCase_ : List[Any] = a == b UpperCAmelCase_ : List[Any] = name_a UpperCAmelCase_ : Any = name_b return res def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__lowerCamelCase, __lowerCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g, __lowerCamelCase, __lowerCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g, __lowerCamelCase, __lowerCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g, __lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): for n in graph_proto.node: _node_replace_input_with(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Tuple = list(model.graph.initializer ) UpperCAmelCase_ : List[Any] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i UpperCAmelCase_ : List[str] = inits[i].name UpperCAmelCase_ : Union[str, Any] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph, __lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase ): UpperCAmelCase_ : str = os.path.dirname(__lowerCamelCase ) UpperCAmelCase_ : Dict = os.path.basename(__lowerCamelCase ) UpperCAmelCase_ : Union[str, Any] = onnx.load(os.path.join(__lowerCamelCase, __lowerCamelCase ) ) UpperCAmelCase_ : Dict = list(model.graph.initializer ) UpperCAmelCase_ : Any = set() UpperCAmelCase_ : List[Any] = {} UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : str = 0 for i in range(len(__lowerCamelCase ) ): if i in dup_set: continue for j in range(i + 1, len(__lowerCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i], inits[j] ): dup_set.add(__lowerCamelCase ) dup_set.add(__lowerCamelCase ) UpperCAmelCase_ : Optional[int] = inits[j].data_type UpperCAmelCase_ : int = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("unexpected data type: ", __lowerCamelCase ) total_reduced_size += mem_size UpperCAmelCase_ : Optional[Any] = inits[i].name UpperCAmelCase_ : Dict = inits[j].name if name_i in dup_map: dup_map[name_i].append(__lowerCamelCase ) else: UpperCAmelCase_ : Optional[Any] = [name_j] ind_to_replace.append((j, i) ) print("total reduced size: ", total_reduced_size / 1024 / 1024 / 1024, "GB" ) UpperCAmelCase_ : str = sorted(__lowerCamelCase ) _remove_dup_initializers_from_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : List[Any] = "optimized_" + model_file_name UpperCAmelCase_ : int = os.path.join(__lowerCamelCase, __lowerCamelCase ) onnx.save(__lowerCamelCase, __lowerCamelCase ) return new_model
23
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
23
1
"""simple docstring""" from itertools import product def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : str = sides_number UpperCAmelCase_ : int = max_face_number * dice_number UpperCAmelCase_ : Union[str, Any] = [0] * (max_total + 1) UpperCAmelCase_ : str = 1 UpperCAmelCase_ : Union[str, Any] = range(__lowerCamelCase, max_face_number + 1 ) for dice_numbers in product(__lowerCamelCase, repeat=__lowerCamelCase ): UpperCAmelCase_ : int = sum(__lowerCamelCase ) totals_frequencies[total] += 1 return totals_frequencies def __a ( ): UpperCAmelCase_ : str = total_frequency_distribution( sides_number=4, dice_number=9 ) UpperCAmelCase_ : Dict = total_frequency_distribution( sides_number=6, dice_number=6 ) UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : Union[str, Any] = 9 UpperCAmelCase_ : Optional[int] = 4 * 9 UpperCAmelCase_ : int = 6 for peter_total in range(__lowerCamelCase, max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) UpperCAmelCase_ : Optional[int] = (4**9) * (6**6) UpperCAmelCase_ : Dict = peter_wins_count / total_games_number UpperCAmelCase_ : Tuple = round(__lowerCamelCase, ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f"""{solution() = }""")
23
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _a = logging.get_logger(__name__) # pylint: disable=invalid-name _a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ): UpperCAmelCase_ : List[str] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : Tuple = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" super().__init__() self.register_modules( unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , ) UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" if latents is None: UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) UpperCAmelCase_ : str = latents.to(lowercase_ ) UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma return latents def UpperCamelCase__ ( self , lowercase_=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" ) UpperCAmelCase_ : int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self , lowercase_=0 ): """simple docstring""" if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=lowercase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : List[Any] = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ ) # We'll offload the last model manually. UpperCAmelCase_ : Tuple = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCamelCase__ ( self ): """simple docstring""" if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase_ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowercase_ ) def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : str = self._execution_device UpperCAmelCase_ : List[Any] = guidance_scale > 1.0 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 ) UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ ) self.scheduler.set_timesteps(lowercase_ , device=lowercase_ ) UpperCAmelCase_ : List[Any] = self.scheduler.timesteps UpperCAmelCase_ : List[str] = self.unet.config.in_channels UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor ) # create initial latent UpperCAmelCase_ : int = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds} UpperCAmelCase_ : Optional[Any] = self.unet( sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 ) UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : List[str] = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0] # post-processing UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5 UpperCAmelCase_ : int = image.clamp(0 , 1 ) UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
23
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig _a = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = """tapas""" def __init__( self , lowercase_=3_0522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1024 , lowercase_=[3, 256, 256, 2, 256, 256, 10] , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=0 , lowercase_=10.0 , lowercase_=0 , lowercase_=1.0 , lowercase_=None , lowercase_=1.0 , lowercase_=False , lowercase_=None , lowercase_=1.0 , lowercase_=1.0 , lowercase_=False , lowercase_=False , lowercase_="ratio" , lowercase_=None , lowercase_=None , lowercase_=64 , lowercase_=32 , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ): """simple docstring""" super().__init__(pad_token_id=lowercase_ , **lowercase_ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Union[str, Any] = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : str = intermediate_size UpperCAmelCase_ : Tuple = hidden_dropout_prob UpperCAmelCase_ : int = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = max_position_embeddings UpperCAmelCase_ : str = type_vocab_sizes UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : List[Any] = layer_norm_eps # Fine-tuning task hyperparameters UpperCAmelCase_ : Any = positive_label_weight UpperCAmelCase_ : List[Any] = num_aggregation_labels UpperCAmelCase_ : Any = aggregation_loss_weight UpperCAmelCase_ : Optional[Any] = use_answer_as_supervision UpperCAmelCase_ : Optional[int] = answer_loss_importance UpperCAmelCase_ : str = use_normalized_answer_loss UpperCAmelCase_ : Dict = huber_loss_delta UpperCAmelCase_ : List[str] = temperature UpperCAmelCase_ : Union[str, Any] = aggregation_temperature UpperCAmelCase_ : List[Any] = use_gumbel_for_cells UpperCAmelCase_ : Tuple = use_gumbel_for_aggregation UpperCAmelCase_ : str = average_approximation_function UpperCAmelCase_ : Dict = cell_selection_preference UpperCAmelCase_ : Optional[Any] = answer_loss_cutoff UpperCAmelCase_ : Optional[int] = max_num_rows UpperCAmelCase_ : Optional[Any] = max_num_columns UpperCAmelCase_ : Union[str, Any] = average_logits_per_cell UpperCAmelCase_ : int = select_one_column UpperCAmelCase_ : List[str] = allow_empty_column_selection UpperCAmelCase_ : Tuple = init_cell_selection_weights_to_zero UpperCAmelCase_ : int = reset_position_index_per_cell UpperCAmelCase_ : Optional[int] = disable_per_token_loss # Aggregation hyperparameters UpperCAmelCase_ : int = aggregation_labels UpperCAmelCase_ : List[Any] = no_aggregation_label_index if isinstance(self.aggregation_labels , lowercase_ ): UpperCAmelCase_ : str = {int(lowercase_ ): v for k, v in aggregation_labels.items()}
23
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _a = logging.get_logger(__name__) _a = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """detr""" SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = backbone_config.get("model_type" ) UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None UpperCAmelCase_ : int = use_timm_backbone UpperCAmelCase_ : int = backbone_config UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : int = num_queries UpperCAmelCase_ : Union[str, Any] = d_model UpperCAmelCase_ : str = encoder_ffn_dim UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : List[Any] = encoder_attention_heads UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim UpperCAmelCase_ : Optional[Any] = decoder_layers UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads UpperCAmelCase_ : Optional[int] = dropout UpperCAmelCase_ : List[str] = attention_dropout UpperCAmelCase_ : Any = activation_dropout UpperCAmelCase_ : str = activation_function UpperCAmelCase_ : Tuple = init_std UpperCAmelCase_ : Optional[Any] = init_xavier_std UpperCAmelCase_ : Optional[Any] = encoder_layerdrop UpperCAmelCase_ : Optional[int] = decoder_layerdrop UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : int = auxiliary_loss UpperCAmelCase_ : Optional[Any] = position_embedding_type UpperCAmelCase_ : Tuple = backbone UpperCAmelCase_ : Optional[int] = use_pretrained_backbone UpperCAmelCase_ : Dict = dilation # Hungarian matcher UpperCAmelCase_ : Union[str, Any] = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : int = giou_cost # Loss coefficients UpperCAmelCase_ : str = mask_loss_coefficient UpperCAmelCase_ : Any = dice_loss_coefficient UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient UpperCAmelCase_ : List[str] = giou_loss_coefficient UpperCAmelCase_ : List[Any] = eos_coefficient super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.encoder_attention_heads @property def UpperCamelCase__ ( self ): """simple docstring""" return self.d_model @classmethod def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ): """simple docstring""" return cls(backbone_config=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase_ : str = self.__class__.model_type return output class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def UpperCamelCase__ ( self ): """simple docstring""" return 1E-5 @property def UpperCamelCase__ ( self ): """simple docstring""" return 12
23
1
"""simple docstring""" _a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution _a = [None] * 10_000_000 _a = True _a = False def __a ( __lowerCamelCase ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) ) UpperCAmelCase_ : List[str] = number_chain while number < 1000_0000: UpperCAmelCase_ : List[Any] = number_chain number *= 10 return number_chain def __a ( __lowerCamelCase = 1000_0000 ): for i in range(1, __lowerCamelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution() = }""")
23
"""simple docstring""" _a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution _a = [None] * 10_000_000 _a = True _a = False def __a ( __lowerCamelCase ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) ) UpperCAmelCase_ : List[str] = number_chain while number < 1000_0000: UpperCAmelCase_ : List[Any] = number_chain number *= 10 return number_chain def __a ( __lowerCamelCase = 1000_0000 ): for i in range(1, __lowerCamelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution() = }""")
23
1
"""simple docstring""" from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): for param, grad_param in zip(model_a.parameters(), model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad, grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad, grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=True ): model.train() UpperCAmelCase_ : str = model(__lowerCamelCase ) UpperCAmelCase_ : List[str] = F.mse_loss(__lowerCamelCase, target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase=False ): set_seed(42 ) UpperCAmelCase_ : Optional[Any] = RegressionModel() UpperCAmelCase_ : Tuple = deepcopy(__lowerCamelCase ) UpperCAmelCase_ : Tuple = RegressionDataset(length=80 ) UpperCAmelCase_ : Any = DataLoader(__lowerCamelCase, batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase_ : Union[str, Any] = AdamW(params=model.parameters(), lr=1E-3 ) UpperCAmelCase_ : str = AdamW(params=ddp_model.parameters(), lr=1E-3 ) UpperCAmelCase_ : Any = LambdaLR(__lowerCamelCase, lr_lambda=lambda __lowerCamelCase : epoch**0.65 ) UpperCAmelCase_ : Optional[Any] = LambdaLR(__lowerCamelCase, lr_lambda=lambda __lowerCamelCase : epoch**0.65 ) # Make a copy of `model` if sched: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = accelerator.prepare(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) else: UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(__lowerCamelCase, __lowerCamelCase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __a ( __lowerCamelCase ): # Test when on a single CPU or GPU that the context manager does nothing UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_training_setup(__lowerCamelCase ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = next(iter(__lowerCamelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__lowerCamelCase ): step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) else: # Sync grads step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad, ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) UpperCAmelCase_ : int = ddp_input[torch.randperm(len(__lowerCamelCase ) )] def __a ( __lowerCamelCase ): # Test on distributed setup that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = get_training_setup(__lowerCamelCase ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = next(iter(__lowerCamelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ : int = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__lowerCamelCase ): step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) else: # Sync grads step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) UpperCAmelCase_ : Tuple = ddp_input[torch.randperm(len(__lowerCamelCase ) )] def __a ( __lowerCamelCase=False, __lowerCamelCase=False ): UpperCAmelCase_ : int = Accelerator( split_batches=__lowerCamelCase, dispatch_batches=__lowerCamelCase, gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_training_setup(__lowerCamelCase ) for iteration, batch in enumerate(__lowerCamelCase ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ : int = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(__lowerCamelCase ): step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCamelCase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) UpperCAmelCase_ : Optional[int] = ddp_input[torch.randperm(len(__lowerCamelCase ) )] GradientState._reset_state() def __a ( __lowerCamelCase=False, __lowerCamelCase=False ): UpperCAmelCase_ : Optional[Any] = Accelerator( split_batches=__lowerCamelCase, dispatch_batches=__lowerCamelCase, gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_training_setup(__lowerCamelCase, __lowerCamelCase ) for iteration, batch in enumerate(__lowerCamelCase ): UpperCAmelCase_ , UpperCAmelCase_ : Any = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ : str = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCamelCase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(__lowerCamelCase ): step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" UpperCAmelCase_ : Dict = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCamelCase )) if accelerator.num_processes > 1: check_model_parameters(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def __a ( ): UpperCAmelCase_ : int = Accelerator() UpperCAmelCase_ : Optional[Any] = RegressionDataset(length=80 ) UpperCAmelCase_ : Tuple = DataLoader(__lowerCamelCase, batch_size=16 ) UpperCAmelCase_ : int = RegressionDataset(length=96 ) UpperCAmelCase_ : int = DataLoader(__lowerCamelCase, batch_size=16 ) UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowerCamelCase, __lowerCamelCase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(__lowerCamelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase ) if iteration < len(__lowerCamelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(__lowerCamelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase ) if batch_num < len(__lowerCamelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __a ( ): UpperCAmelCase_ : Union[str, Any] = Accelerator() UpperCAmelCase_ : Any = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(__lowerCamelCase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(__lowerCamelCase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, ", f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", ) test_gradient_accumulation(__lowerCamelCase, __lowerCamelCase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<", "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", "`split_batches=False`, `dispatch_batches=False`**", ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", ) test_gradient_accumulation_with_opt_and_scheduler(__lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
23
"""simple docstring""" def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # Return True if there is node that has not iterated. UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase ) UpperCAmelCase_ : Any = [] queue.append(__lowerCamelCase ) UpperCAmelCase_ : Tuple = True while queue: UpperCAmelCase_ : str = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowerCamelCase ) UpperCAmelCase_ : Any = True UpperCAmelCase_ : Union[str, Any] = u return visited[t] def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # This array is filled by BFS and to store path UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase )) UpperCAmelCase_ : Any = 0 while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : int = float("Inf" ) UpperCAmelCase_ : Tuple = sink while s != source: # Find the minimum value in select path UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] ) UpperCAmelCase_ : Dict = parent[s] max_flow += path_flow UpperCAmelCase_ : Optional[Any] = sink while v != source: UpperCAmelCase_ : List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow UpperCAmelCase_ : Optional[int] = parent[v] return max_flow _a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _a , _a = 0, 5 print(ford_fulkerson(graph, source, sink))
23
1
"""simple docstring""" from math import factorial _a = {str(digit): factorial(digit) for digit in range(10)} def __a ( __lowerCamelCase ): if not isinstance(__lowerCamelCase, __lowerCamelCase ): raise TypeError("Parameter number must be int" ) if number < 0: raise ValueError("Parameter number must be greater than or equal to 0" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCamelCase ) ) def __a ( __lowerCamelCase = 60, __lowerCamelCase = 100_0000 ): if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not isinstance(__lowerCamelCase, __lowerCamelCase ): raise TypeError("Parameters chain_length and number_limit must be int" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( "Parameters chain_length and number_limit must be greater than 0" ) # the counter for the chains with the exact desired length UpperCAmelCase_ : str = 0 # the cached sizes of the previous chains UpperCAmelCase_ : dict[int, int] = {} for start_chain_element in range(1, __lowerCamelCase ): # The temporary set will contain the elements of the chain UpperCAmelCase_ : Optional[Any] = set() UpperCAmelCase_ : Optional[int] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. UpperCAmelCase_ : Tuple = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCamelCase ) chain_set_length += 1 UpperCAmelCase_ : str = digit_factorial_sum(__lowerCamelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] UpperCAmelCase_ : str = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution()}""")
23
"""simple docstring""" import datasets _a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' _a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' _a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def __a ( __lowerCamelCase, __lowerCamelCase ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
23
1
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean _a = 0 _a = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right _a = tuple[int, int] class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : int = pos_x UpperCAmelCase_ : List[Any] = pos_y UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x) UpperCAmelCase_ : Any = goal_x UpperCAmelCase_ : Dict = goal_y UpperCAmelCase_ : Any = g_cost UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = self.calculate_heuristic() UpperCAmelCase_ : Any = self.g_cost + self.h_cost def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowercase_ ) + abs(lowercase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , lowercase_ ): """simple docstring""" return self.f_cost < other.f_cost class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ ) UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ ) UpperCAmelCase_ : str = [self.start] UpperCAmelCase_ : list[Node] = [] UpperCAmelCase_ : int = False def UpperCamelCase__ ( self ): """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowercase_ ) self.closed_nodes.append(lowercase_ ) UpperCAmelCase_ : str = self.get_successors(lowercase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowercase_ ) else: # retrieve the best current path UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowercase_ ) else: self.open_nodes.append(lowercase_ ) return [self.start.pos] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Any = [] for action in delta: UpperCAmelCase_ : str = parent.pos_x + action[1] UpperCAmelCase_ : int = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) ) return successors def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = node UpperCAmelCase_ : int = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Optional[int] = current_node.parent path.reverse() return path class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = False def UpperCamelCase__ ( self ): """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 ) UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowercase_ , lowercase_ ) self.fwd_astar.closed_nodes.append(lowercase_ ) self.bwd_astar.closed_nodes.append(lowercase_ ) UpperCAmelCase_ : Tuple = current_bwd_node UpperCAmelCase_ : str = current_fwd_node UpperCAmelCase_ : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowercase_ ) else: # retrieve the best current path UpperCAmelCase_ : List[Any] = astar.open_nodes.pop( astar.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowercase_ ) else: astar.open_nodes.append(lowercase_ ) return [self.fwd_astar.start.pos] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ ) UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ : Any = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] _a = (0, 0) _a = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) _a = time.time() _a = AStar(init, goal) _a = a_star.search() _a = time.time() - start_time print(f"""AStar execution time = {end_time:f} seconds""") _a = time.time() _a = BidirectionalAStar(init, goal) _a = time.time() - bd_start_time print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
23
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _a = logging.get_logger(__name__) class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = feature_size UpperCAmelCase_ : Any = sampling_rate UpperCAmelCase_ : Any = padding_value UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" ) UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ ) super().__init__(**lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): UpperCAmelCase_ : Dict = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]] UpperCAmelCase_ : List[str] = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase_ ) == 0: if return_attention_mask: UpperCAmelCase_ : Union[str, Any] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch UpperCAmelCase_ : List[str] = required_input[0] if isinstance(lowercase_ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. UpperCAmelCase_ : Any = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase_ ): UpperCAmelCase_ : Optional[Any] = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase_ ): UpperCAmelCase_ : Dict = "tf" elif is_torch_tensor(lowercase_ ): UpperCAmelCase_ : Any = "pt" elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ): UpperCAmelCase_ : str = "np" else: raise ValueError( F"""type of {first_element} unknown: {type(lowercase_ )}. """ "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ ) else: UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value] # Convert padding_strategy in PaddingStrategy UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ ) UpperCAmelCase_ : str = processed_features[self.model_input_names[0]] UpperCAmelCase_ : int = len(lowercase_ ) if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) UpperCAmelCase_ : int = [] for i in range(lowercase_ ): UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()} # truncation UpperCAmelCase_ : List[str] = self._truncate( lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , ) truncated_inputs.append(lowercase_ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH UpperCAmelCase_ : List[str] = {} for i in range(lowercase_ ): # padding UpperCAmelCase_ : int = self._pad( truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , ) for key, value in outputs.items(): if key not in batch_outputs: UpperCAmelCase_ : Any = [] if value.dtype is np.dtype(np.floataa ): UpperCAmelCase_ : List[Any] = value.astype(np.floataa ) batch_outputs[key].append(lowercase_ ) return BatchFeature(lowercase_ , tensor_type=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: UpperCAmelCase_ : Tuple = len(lowercase_ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa ) if needs_to_be_padded: UpperCAmelCase_ : Dict = max_length - len(lowercase_ ) if self.padding_side == "right": if return_attention_mask: UpperCAmelCase_ : List[Any] = np.pad( processed_features["attention_mask"] , (0, difference) ) UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) UpperCAmelCase_ : Optional[Any] = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: UpperCAmelCase_ : Optional[Any] = np.pad( processed_features["attention_mask"] , (difference, 0) ) UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) UpperCAmelCase_ : str = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length if needs_to_be_truncated: UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ): """simple docstring""" # Get padding strategy if padding is not False: if padding is True: UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = padding else: UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
23
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class A_ (lowercase__ ,lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = StableUnCLIPImgaImgPipeline SCREAMING_SNAKE_CASE__ : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS SCREAMING_SNAKE_CASE__ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS SCREAMING_SNAKE_CASE__ : Tuple = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess SCREAMING_SNAKE_CASE__ : Optional[Any] = frozenset([] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = 32 UpperCAmelCase_ : Dict = embedder_hidden_size # image encoding components UpperCAmelCase_ : List[str] = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) UpperCAmelCase_ : str = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowercase_ , projection_dim=lowercase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) UpperCAmelCase_ : Any = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ ) UpperCAmelCase_ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) UpperCAmelCase_ : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , ) torch.manual_seed(0 ) UpperCAmelCase_ : int = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = AutoencoderKL() UpperCAmelCase_ : List[Any] = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 , lowercase_=True ): """simple docstring""" if str(lowercase_ ).startswith("mps" ): UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowercase_ ) else: UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) UpperCAmelCase_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) if pil_image: UpperCAmelCase_ : Any = input_image * 0.5 + 0.5 UpperCAmelCase_ : str = input_image.clamp(0 , 1 ) UpperCAmelCase_ : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() UpperCAmelCase_ : Optional[Any] = DiffusionPipeline.numpy_to_pil(lowercase_ )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Optional[Any] = self.get_dummy_components() UpperCAmelCase_ : Union[str, Any] = StableUnCLIPImgaImgPipeline(**lowercase_ ) UpperCAmelCase_ : Tuple = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase_ : Dict = self.get_dummy_inputs(lowercase_ ) inputs.update({"image_embeds": None} ) UpperCAmelCase_ : List[str] = sd_pipe(**lowercase_ ).images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : Optional[int] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowercase_ ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowercase_ ) @slow @require_torch_gpu class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) UpperCAmelCase_ : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) UpperCAmelCase_ : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase_ : Any = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = pipe(lowercase_ , "anime turle" , generator=lowercase_ , output_type="np" ) UpperCAmelCase_ : int = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) UpperCAmelCase_ : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) UpperCAmelCase_ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase_ : Any = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase_ : Tuple = pipe(lowercase_ , "anime turle" , generator=lowercase_ , output_type="np" ) UpperCAmelCase_ : Tuple = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase_ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) UpperCAmelCase_ : str = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase_ : List[str] = pipe( lowercase_ , "anime turtle" , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ : Dict = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
23
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 ) UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 ) UpperCAmelCase_ : Optional[Any] = Accelerator() UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ ) try: pickle.loads(pickle.dumps(lowercase_ ) ) except Exception as e: self.fail(F"""Accelerated optimizer pickling failed with {e}""" ) AcceleratorState._reset_state()
23
1
"""simple docstring""" import argparse import os import re import packaging.version _a = 'examples/' _a = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } _a = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } _a = 'README.md' def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f: UpperCAmelCase_ : List[Any] = f.read() UpperCAmelCase_ , UpperCAmelCase_ : Dict = REPLACE_PATTERNS[pattern] UpperCAmelCase_ : Optional[int] = replace.replace("VERSION", __lowerCamelCase ) UpperCAmelCase_ : Any = re_pattern.sub(__lowerCamelCase, __lowerCamelCase ) with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f: f.write(__lowerCamelCase ) def __a ( __lowerCamelCase ): for folder, directories, fnames in os.walk(__lowerCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(__lowerCamelCase, __lowerCamelCase ), __lowerCamelCase, pattern="examples" ) def __a ( __lowerCamelCase, __lowerCamelCase=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) if not patch: update_version_in_examples(__lowerCamelCase ) def __a ( ): UpperCAmelCase_ : Any = "🤗 Transformers currently provides the following architectures" UpperCAmelCase_ : Union[str, Any] = "1. Want to contribute a new model?" with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f: UpperCAmelCase_ : Union[str, Any] = f.readlines() # Find the start of the list. UpperCAmelCase_ : List[Any] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 UpperCAmelCase_ : Optional[int] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): UpperCAmelCase_ : List[Any] = lines[index].replace( "https://huggingface.co/docs/diffusers/main/model_doc", "https://huggingface.co/docs/diffusers/model_doc", ) index += 1 with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f: f.writelines(__lowerCamelCase ) def __a ( ): with open(REPLACE_FILES["init"], "r" ) as f: UpperCAmelCase_ : Tuple = f.read() UpperCAmelCase_ : Any = REPLACE_PATTERNS["init"][0].search(__lowerCamelCase ).groups()[0] return packaging.version.parse(__lowerCamelCase ) def __a ( __lowerCamelCase=False ): UpperCAmelCase_ : Tuple = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: UpperCAmelCase_ : List[str] = default_version.base_version elif patch: UpperCAmelCase_ : Tuple = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: UpperCAmelCase_ : str = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. UpperCAmelCase_ : Dict = input(f"""Which version are you releasing? [{default_version}]""" ) if len(__lowerCamelCase ) == 0: UpperCAmelCase_ : Dict = default_version print(f"""Updating version to {version}.""" ) global_version_update(__lowerCamelCase, patch=__lowerCamelCase ) def __a ( ): UpperCAmelCase_ : str = get_version() UpperCAmelCase_ : Union[str, Any] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" UpperCAmelCase_ : str = current_version.base_version # Check with the user we got that right. UpperCAmelCase_ : Any = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(__lowerCamelCase ) == 0: UpperCAmelCase_ : Optional[Any] = dev_version print(f"""Updating version to {version}.""" ) global_version_update(__lowerCamelCase ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') _a = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
23
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'} class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """ctrl""" SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : List[str] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : Union[str, Any] = n_positions UpperCAmelCase_ : List[str] = n_embd UpperCAmelCase_ : Dict = n_layer UpperCAmelCase_ : Optional[int] = n_head UpperCAmelCase_ : List[str] = dff UpperCAmelCase_ : Tuple = resid_pdrop UpperCAmelCase_ : Optional[Any] = embd_pdrop UpperCAmelCase_ : str = layer_norm_epsilon UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : List[str] = use_cache super().__init__(**lowercase_ )
23
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer _a = logging.get_logger(__name__) _a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _a = { 'vocab_file': { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt' ), } } _a = { 'junnyu/roformer_chinese_small': 1_536, 'junnyu/roformer_chinese_base': 1_536, 'junnyu/roformer_chinese_char_small': 512, 'junnyu/roformer_chinese_char_base': 512, 'junnyu/roformer_small_discriminator': 128, 'junnyu/roformer_small_generator': 128, } _a = { 'junnyu/roformer_chinese_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_base': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_base': {'do_lower_case': True}, 'junnyu/roformer_small_discriminator': {'do_lower_case': True}, 'junnyu/roformer_small_generator': {'do_lower_case': True}, } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ : List[str] = RoFormerTokenizer def __init__( self , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_="[UNK]" , lowercase_="[SEP]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , lowercase_=True , lowercase_=None , **lowercase_ , ): """simple docstring""" super().__init__( lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , ) UpperCAmelCase_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("lowercase" , lowercase_ ) != do_lower_case or pre_tok_state.get("strip_accents" , lowercase_ ) != strip_accents ): UpperCAmelCase_ : List[str] = getattr(lowercase_ , pre_tok_state.pop("type" ) ) UpperCAmelCase_ : Union[str, Any] = do_lower_case UpperCAmelCase_ : Union[str, Any] = strip_accents UpperCAmelCase_ : int = pre_tok_class(**lowercase_ ) UpperCAmelCase_ : List[Any] = do_lower_case def __getstate__( self ): """simple docstring""" UpperCAmelCase_ : Any = self.__dict__.copy() UpperCAmelCase_ : Tuple = BertPreTokenizer() return state def __setstate__( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = d UpperCAmelCase_ : Optional[Any] = self.__dict__["_tokenizer"].get_vocab() UpperCAmelCase_ : Tuple = PreTokenizer.custom(JiebaPreTokenizer(lowercase_ ) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_=None ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" UpperCAmelCase_ : int = [self.sep_token_id] UpperCAmelCase_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ ) return tuple(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=False , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Dict = BertPreTokenizer() return super().save_pretrained(lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
23
"""simple docstring""" def __a ( __lowerCamelCase ): assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0""" raise ValueError(__lowerCamelCase ) else: UpperCAmelCase_ : List[str] = sylvester(number - 1 ) UpperCAmelCase_ : List[str] = num - 1 UpperCAmelCase_ : List[str] = num return lower * upper + 1 if __name__ == "__main__": print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
23
1
"""simple docstring""" import enum import shutil import sys _a , _a = shutil.get_terminal_size() _a = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'} class A_ (enum.Enum ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 def __a ( __lowerCamelCase, __lowerCamelCase="" ): sys.stdout.write(str(__lowerCamelCase ) + end ) sys.stdout.flush() def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase="" ): forceWrite(f"""\u001b[{color}m{content}\u001b[0m""", __lowerCamelCase ) def __a ( ): forceWrite("\r" ) def __a ( __lowerCamelCase, __lowerCamelCase ): forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" ) def __a ( ): forceWrite(" " * TERMINAL_WIDTH ) reset_cursor() def __a ( ): reset_cursor() forceWrite("-" * TERMINAL_WIDTH )
23
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""} SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} ) SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def UpperCamelCase__ ( self ): """simple docstring""" return self._get_superresolution_dummy_components() def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ): """simple docstring""" if str(lowercase_ ).startswith("mps" ): UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ ) else: UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) UpperCAmelCase_ : int = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def UpperCamelCase__ ( self ): """simple docstring""" # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_local() def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
23
1
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor _a = random.Random() def __a ( __lowerCamelCase, __lowerCamelCase=1.0, __lowerCamelCase=None, __lowerCamelCase=None ): if rng is None: UpperCAmelCase_ : Union[str, Any] = global_rng UpperCAmelCase_ : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class A_ (unittest.TestCase ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_=7 , lowercase_=400 , lowercase_=2000 , lowercase_=24 , lowercase_=24 , lowercase_=0.0 , lowercase_=1_6000 , lowercase_=True , lowercase_=True , ): """simple docstring""" UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Any = batch_size UpperCAmelCase_ : List[str] = min_seq_length UpperCAmelCase_ : Dict = max_seq_length UpperCAmelCase_ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase_ : Tuple = feature_size UpperCAmelCase_ : List[str] = num_mel_bins UpperCAmelCase_ : Optional[int] = padding_value UpperCAmelCase_ : Any = sampling_rate UpperCAmelCase_ : Optional[Any] = return_attention_mask UpperCAmelCase_ : List[Any] = do_normalize def UpperCamelCase__ ( self ): """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , lowercase_=False , lowercase_=False ): """simple docstring""" def _flatten(lowercase_ ): return list(itertools.chain(*lowercase_ ) ) if equal_length: UpperCAmelCase_ : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCAmelCase_ : Optional[Any] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase_ : Dict = [np.asarray(lowercase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = SpeechaTextFeatureExtractor if is_speech_available() else None def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = SpeechaTextFeatureExtractionTester(self ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1E-3 ) ) def UpperCamelCase__ ( self ): """simple docstring""" # Tests that all call wrap to encode_plus and batch_encode_plus UpperCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase_ : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCAmelCase_ : List[str] = [np.asarray(lowercase_ ) for speech_input in speech_inputs] # Test feature size UpperCAmelCase_ : int = feature_extractor(lowercase_ , padding=lowercase_ , return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input UpperCAmelCase_ : int = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features UpperCAmelCase_ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) # Test batched UpperCAmelCase_ : Dict = feature_extractor(lowercase_ , return_tensors="np" ).input_features UpperCAmelCase_ : Tuple = feature_extractor(lowercase_ , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ): self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. UpperCAmelCase_ : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCAmelCase_ : Tuple = np.asarray(lowercase_ ) UpperCAmelCase_ : int = feature_extractor(lowercase_ , return_tensors="np" ).input_features UpperCAmelCase_ : str = feature_extractor(lowercase_ , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ): self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase_ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCAmelCase_ : int = ["longest", "max_length", "do_not_pad"] UpperCAmelCase_ : Optional[Any] = [None, 16, None] for max_length, padding in zip(lowercase_ , lowercase_ ): UpperCAmelCase_ : Dict = feature_extractor( lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_attention_mask=lowercase_ ) UpperCAmelCase_ : Any = inputs.input_features UpperCAmelCase_ : List[Any] = inputs.attention_mask UpperCAmelCase_ : Dict = [np.sum(lowercase_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCAmelCase_ : Union[str, Any] = ["longest", "max_length", "do_not_pad"] UpperCAmelCase_ : Optional[Any] = [None, 16, None] for max_length, padding in zip(lowercase_ , lowercase_ ): UpperCAmelCase_ : Optional[Any] = feature_extractor( lowercase_ , max_length=lowercase_ , padding=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ ) UpperCAmelCase_ : Optional[int] = inputs.input_features UpperCAmelCase_ : Union[str, Any] = inputs.attention_mask UpperCAmelCase_ : Union[str, Any] = [np.sum(lowercase_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCAmelCase_ : List[Any] = feature_extractor( lowercase_ , padding="max_length" , max_length=4 , truncation=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ , ) UpperCAmelCase_ : Dict = inputs.input_features UpperCAmelCase_ : Optional[int] = inputs.attention_mask UpperCAmelCase_ : int = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCAmelCase_ : Dict = feature_extractor( lowercase_ , padding="longest" , max_length=4 , truncation=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ , ) UpperCAmelCase_ : Tuple = inputs.input_features UpperCAmelCase_ : int = inputs.attention_mask UpperCAmelCase_ : List[Any] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) UpperCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCAmelCase_ : Dict = feature_extractor( lowercase_ , padding="longest" , max_length=16 , truncation=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ , ) UpperCAmelCase_ : Dict = inputs.input_features UpperCAmelCase_ : int = inputs.attention_mask UpperCAmelCase_ : Any = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def UpperCamelCase__ ( self ): """simple docstring""" import torch UpperCAmelCase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase_ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa ) UpperCAmelCase_ : Tuple = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCAmelCase_ : Optional[int] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) UpperCAmelCase_ : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" from datasets import load_dataset UpperCAmelCase_ : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCAmelCase_ : int = ds.sort("id" ).select(range(lowercase_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def UpperCamelCase__ ( self ): """simple docstring""" # fmt: off UpperCAmelCase_ : List[Any] = np.array([ -1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41, -1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28, -1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25, ] ) # fmt: on UpperCAmelCase_ : List[Any] = self._load_datasamples(1 ) UpperCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase_ : Optional[int] = feature_extractor(lowercase_ , return_tensors="pt" ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase_ , atol=1E-4 ) )
23
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small" UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase_ : List[str] = "en_speaker_1" UpperCAmelCase_ : Tuple = "This is a test string" UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json" UpperCAmelCase_ : Any = "speaker_embeddings" def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.get_tokenizer() UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) UpperCAmelCase_ : int = 35 UpperCAmelCase_ : Optional[Any] = 2 UpperCAmelCase_ : List[Any] = 8 UpperCAmelCase_ : Optional[Any] = { "semantic_prompt": np.ones(lowercase_ ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ ) UpperCAmelCase_ : List[str] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" ) np.savez(lowercase_ , **lowercase_ ) UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ ) UpperCAmelCase_ : List[str] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self.get_tokenizer() UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ ) UpperCAmelCase_ : Tuple = processor(text=self.input_string ) UpperCAmelCase_ : Union[str, Any] = tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
23
1
"""simple docstring""" import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" super().__init__() UpperCAmelCase_ : Any = value_function UpperCAmelCase_ : Union[str, Any] = unet UpperCAmelCase_ : Tuple = scheduler UpperCAmelCase_ : Dict = env UpperCAmelCase_ : Tuple = env.get_dataset() UpperCAmelCase_ : Optional[Any] = {} for key in self.data.keys(): try: UpperCAmelCase_ : Dict = self.data[key].mean() except: # noqa: E722 pass UpperCAmelCase_ : Tuple = {} for key in self.data.keys(): try: UpperCAmelCase_ : Union[str, Any] = self.data[key].std() except: # noqa: E722 pass UpperCAmelCase_ : List[str] = env.observation_space.shape[0] UpperCAmelCase_ : List[Any] = env.action_space.shape[0] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" return (x_in - self.means[key]) / self.stds[key] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" return x_in * self.stds[key] + self.means[key] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" if type(lowercase_ ) is dict: return {k: self.to_torch(lowercase_ ) for k, v in x_in.items()} elif torch.is_tensor(lowercase_ ): return x_in.to(self.unet.device ) return torch.tensor(lowercase_ , device=self.unet.device ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" for key, val in cond.items(): UpperCAmelCase_ : Any = val.clone() return x_in def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = x.shape[0] UpperCAmelCase_ : Any = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model UpperCAmelCase_ : Union[str, Any] = torch.full((batch_size,) , lowercase_ , device=self.unet.device , dtype=torch.long ) for _ in range(lowercase_ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models UpperCAmelCase_ : Tuple = self.value_function(x.permute(0 , 2 , 1 ) , lowercase_ ).sample UpperCAmelCase_ : Union[str, Any] = torch.autograd.grad([y.sum()] , [x] )[0] UpperCAmelCase_ : Optional[Any] = self.scheduler._get_variance(lowercase_ ) UpperCAmelCase_ : str = torch.exp(0.5 * posterior_variance ) UpperCAmelCase_ : Optional[Any] = model_std * grad UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : Optional[Any] = x.detach() UpperCAmelCase_ : Any = x + scale * grad UpperCAmelCase_ : List[str] = self.reset_xa(lowercase_ , lowercase_ , self.action_dim ) UpperCAmelCase_ : Dict = self.unet(x.permute(0 , 2 , 1 ) , lowercase_ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg UpperCAmelCase_ : Optional[int] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , predict_epsilon=lowercase_ )["prev_sample"] # apply conditions to the trajectory (set the initial state) UpperCAmelCase_ : int = self.reset_xa(lowercase_ , lowercase_ , self.action_dim ) UpperCAmelCase_ : int = self.to_torch(lowercase_ ) return x, y def __call__( self , lowercase_ , lowercase_=64 , lowercase_=32 , lowercase_=2 , lowercase_=0.1 ): """simple docstring""" # normalize the observations and create batch dimension UpperCAmelCase_ : Union[str, Any] = self.normalize(lowercase_ , "observations" ) UpperCAmelCase_ : Optional[Any] = obs[None].repeat(lowercase_ , axis=0 ) UpperCAmelCase_ : Union[str, Any] = {0: self.to_torch(lowercase_ )} UpperCAmelCase_ : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) UpperCAmelCase_ : Tuple = randn_tensor(lowercase_ , device=self.unet.device ) UpperCAmelCase_ : Tuple = self.reset_xa(lowercase_ , lowercase_ , self.action_dim ) UpperCAmelCase_ : List[str] = self.to_torch(lowercase_ ) # run the diffusion process UpperCAmelCase_ , UpperCAmelCase_ : str = self.run_diffusion(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # sort output trajectories by value UpperCAmelCase_ : str = y.argsort(0 , descending=lowercase_ ).squeeze() UpperCAmelCase_ : Any = x[sorted_idx] UpperCAmelCase_ : Optional[Any] = sorted_values[:, :, : self.action_dim] UpperCAmelCase_ : Union[str, Any] = actions.detach().cpu().numpy() UpperCAmelCase_ : Tuple = self.de_normalize(lowercase_ , key="actions" ) # select the action with the highest value if y is not None: UpperCAmelCase_ : Union[str, Any] = 0 else: # if we didn't run value guiding, select a random action UpperCAmelCase_ : Union[str, Any] = np.random.randint(0 , lowercase_ ) UpperCAmelCase_ : int = denorm_actions[selected_index, 0] return denorm_actions
23
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) def __a ( __lowerCamelCase, __lowerCamelCase=False ): UpperCAmelCase_ : Optional[int] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: UpperCAmelCase_ : int = "" else: UpperCAmelCase_ : Union[str, Any] = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size] UpperCAmelCase_ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :] def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase ) UpperCAmelCase_ : Tuple = val def __a ( ): UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[str] = DeiTConfig() # all deit models have fine-tuned heads UpperCAmelCase_ : Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase_ : Tuple = 1000 UpperCAmelCase_ : str = "huggingface/label-files" UpperCAmelCase_ : str = "imagenet-1k-id2label.json" UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) ) UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase_ : Any = idalabel UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()} UpperCAmelCase_ : Any = int(deit_name[-6:-4] ) UpperCAmelCase_ : Dict = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): UpperCAmelCase_ : Any = 192 UpperCAmelCase_ : Union[str, Any] = 768 UpperCAmelCase_ : Union[str, Any] = 12 UpperCAmelCase_ : int = 3 elif deit_name[9:].startswith("small" ): UpperCAmelCase_ : List[str] = 384 UpperCAmelCase_ : List[str] = 1536 UpperCAmelCase_ : Dict = 12 UpperCAmelCase_ : Any = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): UpperCAmelCase_ : int = 1024 UpperCAmelCase_ : List[Any] = 4096 UpperCAmelCase_ : Optional[int] = 24 UpperCAmelCase_ : int = 16 # load original model from timm UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCAmelCase_ : Optional[Any] = timm_model.state_dict() UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # load HuggingFace model UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor UpperCAmelCase_ : Union[str, Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size ) UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" ) UpperCAmelCase_ : int = encoding["pixel_values"] UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase ) UpperCAmelCase_ : Any = timm_model(__lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) _a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
23
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) def __a ( __lowerCamelCase, __lowerCamelCase=False ): UpperCAmelCase_ : Any = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" UpperCAmelCase_ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: UpperCAmelCase_ : str = "" else: UpperCAmelCase_ : str = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ : List[str] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) UpperCAmelCase_ : List[str] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Tuple = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase_ : Dict = in_proj_bias[: config.hidden_size] UpperCAmelCase_ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ : Any = in_proj_bias[-config.hidden_size :] def __a ( __lowerCamelCase ): UpperCAmelCase_ : str = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Any = dct.pop(__lowerCamelCase ) UpperCAmelCase_ : Optional[int] = val def __a ( ): UpperCAmelCase_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : Optional[int] = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=True ): UpperCAmelCase_ : Union[str, Any] = ViTConfig() # patch_size if model_name[-1] == "8": UpperCAmelCase_ : List[Any] = 8 # set labels if required if not base_model: UpperCAmelCase_ : Any = 1000 UpperCAmelCase_ : int = "huggingface/label-files" UpperCAmelCase_ : Dict = "imagenet-1k-id2label.json" UpperCAmelCase_ : Union[str, Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) ) UpperCAmelCase_ : Dict = {int(__lowerCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase_ : List[str] = idalabel UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: UpperCAmelCase_ : List[Any] = 384 UpperCAmelCase_ : int = 1536 UpperCAmelCase_ : int = 12 UpperCAmelCase_ : Tuple = 6 # load original model from torch hub UpperCAmelCase_ : Union[str, Any] = torch.hub.load("facebookresearch/dino:main", __lowerCamelCase ) original_model.eval() # load state_dict of original model, remove and rename some keys UpperCAmelCase_ : Tuple = original_model.state_dict() if base_model: remove_classification_head_(__lowerCamelCase ) UpperCAmelCase_ : str = create_rename_keys(__lowerCamelCase, base_model=__lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # load HuggingFace model if base_model: UpperCAmelCase_ : Union[str, Any] = ViTModel(__lowerCamelCase, add_pooling_layer=__lowerCamelCase ).eval() else: UpperCAmelCase_ : int = ViTForImageClassification(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor UpperCAmelCase_ : Union[str, Any] = ViTImageProcessor() UpperCAmelCase_ : int = image_processor(images=prepare_img(), return_tensors="pt" ) UpperCAmelCase_ : Any = encoding["pixel_values"] UpperCAmelCase_ : int = model(__lowerCamelCase ) if base_model: UpperCAmelCase_ : Dict = original_model(__lowerCamelCase ) assert torch.allclose(__lowerCamelCase, outputs.last_hidden_state[:, 0, :], atol=1E-1 ) else: UpperCAmelCase_ : List[str] = original_model(__lowerCamelCase ) assert logits.shape == outputs.logits.shape assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) _a = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
23
"""simple docstring""" import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ ) UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )] UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin" ) for f in files ) @slow @require_flax class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ ) UpperCAmelCase_ : Tuple = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 ) UpperCAmelCase_ : List[str] = 4 UpperCAmelCase_ : Tuple = jax.device_count() UpperCAmelCase_ : Optional[int] = num_samples * [prompt] UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : int = replicate(lowercase_ ) UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3 assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1 UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(lowercase_ ) == num_samples def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ ) UpperCAmelCase_ : Optional[int] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : str = jax.random.PRNGKey(0 ) UpperCAmelCase_ : Union[str, Any] = 50 UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[str] = num_samples * [prompt] UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Any = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ ) UpperCAmelCase_ : Any = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : str = jax.random.PRNGKey(0 ) UpperCAmelCase_ : str = 50 UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Dict = replicate(lowercase_ ) UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ ) UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa ) UpperCAmelCase_ : List[Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 ) UpperCAmelCase_ : Optional[int] = 50 UpperCAmelCase_ : Optional[int] = jax.device_count() UpperCAmelCase_ : str = num_samples * [prompt] UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ ) UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = shard(lowercase_ ) UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , ) UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , ) UpperCAmelCase_ : List[Any] = scheduler.create_state() UpperCAmelCase_ : int = scheduler_state UpperCAmelCase_ : Union[str, Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 ) UpperCAmelCase_ : int = 50 UpperCAmelCase_ : str = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : int = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = shard(lowercase_ ) UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ ) UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , ) UpperCAmelCase_ : Any = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1] # With memory efficient attention UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , ) UpperCAmelCase_ : str = replicate(lowercase_ ) UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ ) UpperCAmelCase_ : Optional[int] = shard(lowercase_ ) UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
23
1
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = '▁' _a = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', } _a = { 'vocab_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json' ), }, 'spm_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model' ) }, } _a = { 'facebook/s2t-small-librispeech-asr': 1_024, } _a = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de'] _a = {'mustc': MUSTC_LANGS} class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : List[Any] = MAX_MODEL_INPUT_SIZES SCREAMING_SNAKE_CASE__ : Dict = ["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE__ : List[int] = [] def __init__( self , lowercase_ , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="<pad>" , lowercase_="<unk>" , lowercase_=False , lowercase_=False , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , do_upper_case=lowercase_ , do_lower_case=lowercase_ , tgt_lang=lowercase_ , lang_codes=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) UpperCAmelCase_ : Optional[int] = do_upper_case UpperCAmelCase_ : Optional[Any] = do_lower_case UpperCAmelCase_ : Optional[int] = load_json(lowercase_ ) UpperCAmelCase_ : Any = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : Dict = spm_file UpperCAmelCase_ : Dict = load_spm(lowercase_ , self.sp_model_kwargs ) if lang_codes is not None: UpperCAmelCase_ : str = lang_codes UpperCAmelCase_ : str = LANGUAGES[lang_codes] UpperCAmelCase_ : List[Any] = [F"""<lang:{lang}>""" for lang in self.langs] UpperCAmelCase_ : int = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs} UpperCAmelCase_ : Dict = self.lang_tokens UpperCAmelCase_ : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: UpperCAmelCase_ : Optional[int] = {} @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.encoder ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = new_tgt_lang self.set_tgt_lang_special_tokens(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = self.lang_code_to_id[tgt_lang] UpperCAmelCase_ : List[Any] = [lang_code_id] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.sp_model.encode(lowercase_ , out_type=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.encoder.get(lowercase_ , self.encoder[self.unk_token] ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.decoder.get(lowercase_ , self.unk_token ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : List[str] = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: UpperCAmelCase_ : Any = self.sp_model.decode(lowercase_ ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " UpperCAmelCase_ : str = [] else: current_sub_tokens.append(lowercase_ ) UpperCAmelCase_ : Union[str, Any] = self.sp_model.decode(lowercase_ ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def UpperCamelCase__ ( self , lowercase_ , lowercase_=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ ) UpperCAmelCase_ : Optional[int] = [1] * len(self.prefix_tokens ) UpperCAmelCase_ : Any = [1] if token_ids_a is None: return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" UpperCAmelCase_ : str = self.__dict__.copy() UpperCAmelCase_ : Dict = None return state def __setstate__( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): UpperCAmelCase_ : int = {} UpperCAmelCase_ : int = load_spm(self.spm_file , self.sp_model_kwargs ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" UpperCAmelCase_ : int = Path(lowercase_ ) assert save_dir.is_dir(), F"""{save_directory} should be a directory""" UpperCAmelCase_ : Optional[int] = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) UpperCAmelCase_ : List[Any] = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder , lowercase_ ) if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , lowercase_ ) elif not os.path.isfile(self.spm_file ): with open(lowercase_ , "wb" ) as fi: UpperCAmelCase_ : Any = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (str(lowercase_ ), str(lowercase_ )) def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[str] = sentencepiece.SentencePieceProcessor(**__lowerCamelCase ) spm.Load(str(__lowerCamelCase ) ) return spm def __a ( __lowerCamelCase ): with open(__lowerCamelCase, "r" ) as f: return json.load(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase ): with open(__lowerCamelCase, "w" ) as f: json.dump(__lowerCamelCase, __lowerCamelCase, indent=2 )
23
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean _a = 0 _a = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right _a = tuple[int, int] class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : int = pos_x UpperCAmelCase_ : List[Any] = pos_y UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x) UpperCAmelCase_ : Any = goal_x UpperCAmelCase_ : Dict = goal_y UpperCAmelCase_ : Any = g_cost UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = self.calculate_heuristic() UpperCAmelCase_ : Any = self.g_cost + self.h_cost def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowercase_ ) + abs(lowercase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , lowercase_ ): """simple docstring""" return self.f_cost < other.f_cost class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ ) UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ ) UpperCAmelCase_ : str = [self.start] UpperCAmelCase_ : list[Node] = [] UpperCAmelCase_ : int = False def UpperCamelCase__ ( self ): """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowercase_ ) self.closed_nodes.append(lowercase_ ) UpperCAmelCase_ : str = self.get_successors(lowercase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowercase_ ) else: # retrieve the best current path UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowercase_ ) else: self.open_nodes.append(lowercase_ ) return [self.start.pos] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Any = [] for action in delta: UpperCAmelCase_ : str = parent.pos_x + action[1] UpperCAmelCase_ : int = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) ) return successors def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = node UpperCAmelCase_ : int = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Optional[int] = current_node.parent path.reverse() return path class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = False def UpperCamelCase__ ( self ): """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 ) UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowercase_ , lowercase_ ) self.fwd_astar.closed_nodes.append(lowercase_ ) self.bwd_astar.closed_nodes.append(lowercase_ ) UpperCAmelCase_ : Tuple = current_bwd_node UpperCAmelCase_ : str = current_fwd_node UpperCAmelCase_ : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowercase_ ) else: # retrieve the best current path UpperCAmelCase_ : List[Any] = astar.open_nodes.pop( astar.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowercase_ ) else: astar.open_nodes.append(lowercase_ ) return [self.fwd_astar.start.pos] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ ) UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ : Any = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] _a = (0, 0) _a = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) _a = time.time() _a = AStar(init, goal) _a = a_star.search() _a = time.time() - start_time print(f"""AStar execution time = {end_time:f} seconds""") _a = time.time() _a = BidirectionalAStar(init, goal) _a = time.time() - bd_start_time print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
23
1
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _a = logging.get_logger(__name__) _a = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """detr""" SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = backbone_config.get("model_type" ) UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None UpperCAmelCase_ : int = use_timm_backbone UpperCAmelCase_ : int = backbone_config UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : int = num_queries UpperCAmelCase_ : Union[str, Any] = d_model UpperCAmelCase_ : str = encoder_ffn_dim UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : List[Any] = encoder_attention_heads UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim UpperCAmelCase_ : Optional[Any] = decoder_layers UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads UpperCAmelCase_ : Optional[int] = dropout UpperCAmelCase_ : List[str] = attention_dropout UpperCAmelCase_ : Any = activation_dropout UpperCAmelCase_ : str = activation_function UpperCAmelCase_ : Tuple = init_std UpperCAmelCase_ : Optional[Any] = init_xavier_std UpperCAmelCase_ : Optional[Any] = encoder_layerdrop UpperCAmelCase_ : Optional[int] = decoder_layerdrop UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : int = auxiliary_loss UpperCAmelCase_ : Optional[Any] = position_embedding_type UpperCAmelCase_ : Tuple = backbone UpperCAmelCase_ : Optional[int] = use_pretrained_backbone UpperCAmelCase_ : Dict = dilation # Hungarian matcher UpperCAmelCase_ : Union[str, Any] = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : int = giou_cost # Loss coefficients UpperCAmelCase_ : str = mask_loss_coefficient UpperCAmelCase_ : Any = dice_loss_coefficient UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient UpperCAmelCase_ : List[str] = giou_loss_coefficient UpperCAmelCase_ : List[Any] = eos_coefficient super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.encoder_attention_heads @property def UpperCamelCase__ ( self ): """simple docstring""" return self.d_model @classmethod def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ): """simple docstring""" return cls(backbone_config=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase_ : str = self.__class__.model_type return output class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def UpperCamelCase__ ( self ): """simple docstring""" return 1E-5 @property def UpperCamelCase__ ( self ): """simple docstring""" return 12
23
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,) SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),) def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : int = { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowercase_ ) return config def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = dict(self.forward_default_kwargs ) UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = self.dummy_sample UpperCAmelCase_ : Dict = 0.1 * sample UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals UpperCAmelCase_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals UpperCAmelCase_ : int = dummy_past_residuals[:] UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs ) UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ ) UpperCAmelCase_ : Optional[int] = self.dummy_sample UpperCAmelCase_ : List[str] = 0.1 * sample UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : str = self.get_scheduler_config() UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:] UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = self.scheduler_classes[0] UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ ) UpperCAmelCase_ : Tuple = 10 UpperCAmelCase_ : List[str] = self.dummy_model() UpperCAmelCase_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample return sample def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = dict(self.forward_default_kwargs ) UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ ) for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : Any = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ ) UpperCAmelCase_ : str = self.dummy_sample UpperCAmelCase_ : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ): scheduler.set_timesteps(lowercase_ ) elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ): UpperCAmelCase_ : List[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase_ : List[str] = dummy_past_residuals[:] UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase__ ( self ): """simple docstring""" for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_ ) UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0] UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def UpperCamelCase__ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 UpperCAmelCase_ : List[Any] = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : List[Any] = self.dummy_sample UpperCAmelCase_ : Optional[int] = 0.1 * sample UpperCAmelCase_ : List[str] = self.get_scheduler_config() UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample def UpperCamelCase__ ( self ): """simple docstring""" with self.assertRaises(lowercase_ ): UpperCAmelCase_ : List[str] = self.scheduler_classes[0] UpperCAmelCase_ : str = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.full_loop() UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2 assert abs(result_mean.item() - 0.25_80 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 67.39_86 ) < 1E-2 assert abs(result_mean.item() - 0.08_78 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2 assert abs(result_mean.item() - 0.29_95 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2 assert abs(result_mean.item() - 0.24_34 ) < 1E-3
23
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _a = logging.get_logger(__name__) def __a ( __lowerCamelCase ): if isinstance(__lowerCamelCase, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__lowerCamelCase, (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__lowerCamelCase ): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""" ) class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = ["""pixel_values"""] def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ): """simple docstring""" super().__init__(**lowercase_ ) UpperCAmelCase_ : str = size if size is not None else {"shortest_edge": 256} UpperCAmelCase_ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 224, "width": 224} UpperCAmelCase_ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" ) UpperCAmelCase_ : Any = do_resize UpperCAmelCase_ : Union[str, Any] = size UpperCAmelCase_ : Any = do_center_crop UpperCAmelCase_ : Optional[Any] = crop_size UpperCAmelCase_ : Any = resample UpperCAmelCase_ : int = do_rescale UpperCAmelCase_ : List[Any] = rescale_factor UpperCAmelCase_ : Optional[int] = offset UpperCAmelCase_ : Optional[Any] = do_normalize UpperCAmelCase_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = None , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" in size: UpperCAmelCase_ : Tuple = get_resize_output_image_size(lowercase_ , size["shortest_edge"] , default_to_square=lowercase_ ) elif "height" in size and "width" in size: UpperCAmelCase_ : Any = (size["height"], size["width"]) else: raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Tuple = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = True , lowercase_ = None , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = image.astype(np.floataa ) if offset: UpperCAmelCase_ : Any = image - (scale / 2) return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ): """simple docstring""" return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , ): """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. UpperCAmelCase_ : str = to_numpy_array(lowercase_ ) if do_resize: UpperCAmelCase_ : Any = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) if do_center_crop: UpperCAmelCase_ : Any = self.center_crop(lowercase_ , size=lowercase_ ) if do_rescale: UpperCAmelCase_ : str = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_ ) if do_normalize: UpperCAmelCase_ : List[str] = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) UpperCAmelCase_ : Any = to_channel_dimension_format(lowercase_ , lowercase_ ) return image def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : int = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : Tuple = resample if resample is not None else self.resample UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : Any = offset if offset is not None else self.offset UpperCAmelCase_ : int = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : Tuple = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else self.image_std UpperCAmelCase_ : Dict = size if size is not None else self.size UpperCAmelCase_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : str = get_size_dict(lowercase_ , param_name="crop_size" ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) UpperCAmelCase_ : Optional[Any] = make_batched(lowercase_ ) UpperCAmelCase_ : List[Any] = [ [ self._preprocess_image( image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , ) for img in video ] for video in videos ] UpperCAmelCase_ : Tuple = {"pixel_values": videos} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
23
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _a = object() # For specifying empty leaf dict `{}` _a = object() def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ): UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )] if matches and all(__lowerCamelCase ): return True return False def __a ( __lowerCamelCase ): def replace(__lowerCamelCase, __lowerCamelCase ): for rule, replacement in rules: if _match(__lowerCamelCase, __lowerCamelCase ): return replacement return val return replace def __a ( ): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )), (("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )), (("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def __a ( __lowerCamelCase ): UpperCAmelCase_ : List[str] = _get_partition_rules() UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase ) UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )} UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCamelCase ) )
23
1
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def __a ( __lowerCamelCase ): def wrapper(*__lowerCamelCase, **__lowerCamelCase ): UpperCAmelCase_ : Dict = timeit.default_timer() UpperCAmelCase_ : Any = func(*__lowerCamelCase, **__lowerCamelCase ) UpperCAmelCase_ : Dict = timeit.default_timer() - starttime return delta UpperCAmelCase_ : Dict = func.__name__ return wrapper def __a ( __lowerCamelCase, __lowerCamelCase=100, __lowerCamelCase=None ): UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : List[str] = seq_shapes or {} for i in range(__lowerCamelCase ): UpperCAmelCase_ : List[str] = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__lowerCamelCase, _ArrayXD ): UpperCAmelCase_ : int = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__lowerCamelCase, datasets.Value ): if v.dtype == "string": UpperCAmelCase_ : Union[str, Any] = "The small grey turtle was surprisingly fast when challenged." else: UpperCAmelCase_ : Dict = np.random.randint(10, size=1 ).astype(v.dtype ).item() elif isinstance(__lowerCamelCase, datasets.Sequence ): while isinstance(__lowerCamelCase, datasets.Sequence ): UpperCAmelCase_ : Dict = v.feature UpperCAmelCase_ : str = seq_shapes[k] UpperCAmelCase_ : List[Any] = np.random.rand(*__lowerCamelCase ).astype(v.dtype ) UpperCAmelCase_ : Optional[Any] = data dummy_data.append((i, example) ) return dummy_data def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=100, __lowerCamelCase=None ): UpperCAmelCase_ : List[Any] = generate_examples(__lowerCamelCase, num_examples=__lowerCamelCase, seq_shapes=__lowerCamelCase ) with ArrowWriter(features=__lowerCamelCase, path=__lowerCamelCase ) as writer: for key, record in dummy_data: UpperCAmelCase_ : Optional[int] = features.encode_example(__lowerCamelCase ) writer.write(__lowerCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) UpperCAmelCase_ : Union[str, Any] = datasets.Dataset.from_file(filename=__lowerCamelCase, info=datasets.DatasetInfo(features=__lowerCamelCase ) ) return dataset
23
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _a = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )] if identifier is not None: UpperCAmelCase_ : Dict = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase_ , lowercase_ ): for n_ in n_identifier: UpperCAmelCase_ : str = [file for file in files if n_ not in file] else: UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file] UpperCAmelCase_ : Union[str, Any] = ignore_files or [] ignore_files.append("__init__.py" ) UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , lowercase_ ) if only_modules: UpperCAmelCase_ : str = file.split("." )[0] try: UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ ) UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F"""{module_identifier} is not a module.""" ) else: UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = Path("src/transformers" ) UpperCAmelCase_ : str = "modeling" UpperCAmelCase_ : Optional[Any] = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = Path("src/transformers" ) UpperCAmelCase_ : Any = "tokenization" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = "configuration" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"] self.analyze_directory(lowercase_ , n_identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = Path("docs/source" ) UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"] self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
23
1
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""} SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} ) SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def UpperCamelCase__ ( self ): """simple docstring""" return self._get_superresolution_dummy_components() def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ): """simple docstring""" if str(lowercase_ ).startswith("mps" ): UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ ) else: UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) UpperCAmelCase_ : int = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def UpperCamelCase__ ( self ): """simple docstring""" # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_local() def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
23
"""simple docstring""" import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef _a = ( 'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' ) def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) return (preds == labels).mean() def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0] UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "mrpc": return acc_and_fa(__lowerCamelCase, __lowerCamelCase ) elif task_name == "sts-b": return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase ) elif task_name == "qqp": return acc_and_fa(__lowerCamelCase, __lowerCamelCase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} else: raise KeyError(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} else: raise KeyError(__lowerCamelCase )
23
1
"""simple docstring""" from __future__ import annotations from cmath import sqrt def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) UpperCAmelCase_ : Tuple = b * b - 4 * a * c UpperCAmelCase_ : List[Any] = (-b + sqrt(__lowerCamelCase )) / (2 * a) UpperCAmelCase_ : Tuple = (-b - sqrt(__lowerCamelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def __a ( ): UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = quadratic_roots(a=5, b=6, c=1 ) print(f"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
23
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = {'vocab_file': 'vocab.json'} _a = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } _a = {'mgp-str': 27} class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ): """simple docstring""" super().__init__( unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , ) with open(lowercase_ , encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : Dict = json.load(lowercase_ ) UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()} @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [] for s in text: char_tokens.extend(lowercase_ ) return char_tokens def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.decoder.get(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" if not os.path.isdir(lowercase_ ): logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) ) return UpperCAmelCase_ : Optional[int] = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(lowercase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" ) return (vocab_file,)
23
1
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = {'vocab_file': 'vocab.json'} _a = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } _a = {'mgp-str': 27} class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ): """simple docstring""" super().__init__( unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , ) with open(lowercase_ , encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : Dict = json.load(lowercase_ ) UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()} @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [] for s in text: char_tokens.extend(lowercase_ ) return char_tokens def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.decoder.get(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" if not os.path.isdir(lowercase_ ): logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) ) return UpperCAmelCase_ : Optional[int] = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(lowercase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" ) return (vocab_file,)
23
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency _a = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } _a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' _a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def __a ( __lowerCamelCase ): return x[0] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase ) UpperCAmelCase_ : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase ) UpperCAmelCase_ : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase ) UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] ) UpperCAmelCase_ : str = list(freq_to_letter_str.items() ) freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase ) UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(__lowerCamelCase ) def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase ) UpperCAmelCase_ : int = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
23
1
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class A_ (datasets.BeamBasedBuilder ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowercase_ , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(lowercase_ ) class A_ (datasets.BeamBasedBuilder ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowercase_ , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(lowercase_ ) def __a ( ): return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def __a ( ): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class A_ (lowercase__ ): '''simple docstring''' @require_beam def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase_ : str = DummyBeamDataset(cache_dir=lowercase_ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(lowercase_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase_ : Union[str, Any] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , lowercase_ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , lowercase_ ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(lowercase_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def UpperCamelCase__ ( self ): """simple docstring""" import apache_beam as beam UpperCAmelCase_ : Any = beam.io.parquetio.WriteToParquet UpperCAmelCase_ : Dict = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase_ : List[str] = DummyBeamDataset(cache_dir=lowercase_ , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: UpperCAmelCase_ : Optional[int] = partial(lowercase_ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( lowercase_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( lowercase_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase_ : Optional[Any] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , lowercase_ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , lowercase_ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(lowercase_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def UpperCamelCase__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase_ : str = DummyBeamDataset(cache_dir=lowercase_ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase_ : Tuple = NestedBeamDataset(cache_dir=lowercase_ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(lowercase_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) UpperCAmelCase_ : List[str] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , lowercase_ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , lowercase_ ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(lowercase_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
23
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _a = logging.getLogger() def __a ( ): UpperCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ : Dict = parser.parse_args() return args.f class A_ (lowercase__ ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(lowercase_ , "argv" , lowercase_ ): UpperCAmelCase_ : List[str] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(lowercase_ , 0.6_66 ) @slow @require_torch_non_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ )
23
1
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def __a ( __lowerCamelCase ): UpperCAmelCase_ : Dict = SwinConfig(image_size=192 ) if "base" in model_name: UpperCAmelCase_ : str = 6 UpperCAmelCase_ : List[Any] = 128 UpperCAmelCase_ : int = (2, 2, 18, 2) UpperCAmelCase_ : Optional[Any] = (4, 8, 16, 32) elif "large" in model_name: UpperCAmelCase_ : Any = 12 UpperCAmelCase_ : Tuple = 192 UpperCAmelCase_ : Dict = (2, 2, 18, 2) UpperCAmelCase_ : int = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants" ) UpperCAmelCase_ : Tuple = window_size UpperCAmelCase_ : Optional[int] = embed_dim UpperCAmelCase_ : str = depths UpperCAmelCase_ : Dict = num_heads return config def __a ( __lowerCamelCase ): if "encoder.mask_token" in name: UpperCAmelCase_ : Optional[int] = name.replace("encoder.mask_token", "embeddings.mask_token" ) if "encoder.patch_embed.proj" in name: UpperCAmelCase_ : List[str] = name.replace("encoder.patch_embed.proj", "embeddings.patch_embeddings.projection" ) if "encoder.patch_embed.norm" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("encoder.patch_embed.norm", "embeddings.norm" ) if "attn.proj" in name: UpperCAmelCase_ : Any = name.replace("attn.proj", "attention.output.dense" ) if "attn" in name: UpperCAmelCase_ : int = name.replace("attn", "attention.self" ) if "norm1" in name: UpperCAmelCase_ : List[Any] = name.replace("norm1", "layernorm_before" ) if "norm2" in name: UpperCAmelCase_ : str = name.replace("norm2", "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase_ : List[str] = name.replace("mlp.fc1", "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase_ : Optional[int] = name.replace("mlp.fc2", "output.dense" ) if name == "encoder.norm.weight": UpperCAmelCase_ : List[Any] = "layernorm.weight" if name == "encoder.norm.bias": UpperCAmelCase_ : Optional[int] = "layernorm.bias" if "decoder" in name: pass else: UpperCAmelCase_ : str = "swin." + name return name def __a ( __lowerCamelCase, __lowerCamelCase ): for key in orig_state_dict.copy().keys(): UpperCAmelCase_ : List[str] = orig_state_dict.pop(__lowerCamelCase ) if "attn_mask" in key: pass elif "qkv" in key: UpperCAmelCase_ : Optional[Any] = key.split("." ) UpperCAmelCase_ : List[str] = int(key_split[2] ) UpperCAmelCase_ : Dict = int(key_split[4] ) UpperCAmelCase_ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase_ : List[str] = val[:dim, :] UpperCAmelCase_ : List[Any] = val[ dim : dim * 2, : ] UpperCAmelCase_ : Optional[int] = val[-dim:, :] else: UpperCAmelCase_ : Tuple = val[ :dim ] UpperCAmelCase_ : List[str] = val[ dim : dim * 2 ] UpperCAmelCase_ : Any = val[ -dim: ] else: UpperCAmelCase_ : List[str] = val return orig_state_dict def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[str] = torch.load(__lowerCamelCase, map_location="cpu" )["model"] UpperCAmelCase_ : List[Any] = get_swin_config(__lowerCamelCase ) UpperCAmelCase_ : Dict = SwinForMaskedImageModeling(__lowerCamelCase ) model.eval() UpperCAmelCase_ : int = convert_state_dict(__lowerCamelCase, __lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) UpperCAmelCase_ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : str = ViTImageProcessor(size={"height": 192, "width": 192} ) UpperCAmelCase_ : Dict = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw ) UpperCAmelCase_ : Any = image_processor(images=__lowerCamelCase, return_tensors="pt" ) with torch.no_grad(): UpperCAmelCase_ : List[Any] = model(**__lowerCamelCase ).logits print(outputs.keys() ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCamelCase ) if push_to_hub: print(f"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(f"""microsoft/{model_name}""" ) image_processor.push_to_hub(f"""microsoft/{model_name}""" ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='swin-base-simmim-window6-192', type=str, choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'], help='Name of the Swin SimMIM model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth', type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _a = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
23
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
23
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
23
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _a = logging.get_logger(__name__) # pylint: disable=invalid-name _a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ): UpperCAmelCase_ : List[str] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : Tuple = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" super().__init__() self.register_modules( unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , ) UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" if latents is None: UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) UpperCAmelCase_ : str = latents.to(lowercase_ ) UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma return latents def UpperCamelCase__ ( self , lowercase_=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" ) UpperCAmelCase_ : int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self , lowercase_=0 ): """simple docstring""" if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=lowercase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : List[Any] = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ ) # We'll offload the last model manually. UpperCAmelCase_ : Tuple = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCamelCase__ ( self ): """simple docstring""" if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase_ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowercase_ ) def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : str = self._execution_device UpperCAmelCase_ : List[Any] = guidance_scale > 1.0 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 ) UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ ) self.scheduler.set_timesteps(lowercase_ , device=lowercase_ ) UpperCAmelCase_ : List[Any] = self.scheduler.timesteps UpperCAmelCase_ : List[str] = self.unet.config.in_channels UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor ) # create initial latent UpperCAmelCase_ : int = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds} UpperCAmelCase_ : Optional[Any] = self.unet( sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 ) UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : List[str] = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0] # post-processing UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5 UpperCAmelCase_ : int = image.clamp(0 , 1 ) UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
23
1
"""simple docstring""" from __future__ import annotations import typing from collections import Counter def __a ( __lowerCamelCase ): UpperCAmelCase_ : typing.Counter[int] = Counter() for base in range(1, max_perimeter + 1 ): for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ): UpperCAmelCase_ : Optional[Any] = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__lowerCamelCase ): UpperCAmelCase_ : Any = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def __a ( __lowerCamelCase = 1000 ): UpperCAmelCase_ : int = pythagorean_triple(__lowerCamelCase ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f"""Perimeter {solution()} has maximum solutions""")
23
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _a = logging.get_logger(__name__) _a = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """detr""" SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = backbone_config.get("model_type" ) UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None UpperCAmelCase_ : int = use_timm_backbone UpperCAmelCase_ : int = backbone_config UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : int = num_queries UpperCAmelCase_ : Union[str, Any] = d_model UpperCAmelCase_ : str = encoder_ffn_dim UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : List[Any] = encoder_attention_heads UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim UpperCAmelCase_ : Optional[Any] = decoder_layers UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads UpperCAmelCase_ : Optional[int] = dropout UpperCAmelCase_ : List[str] = attention_dropout UpperCAmelCase_ : Any = activation_dropout UpperCAmelCase_ : str = activation_function UpperCAmelCase_ : Tuple = init_std UpperCAmelCase_ : Optional[Any] = init_xavier_std UpperCAmelCase_ : Optional[Any] = encoder_layerdrop UpperCAmelCase_ : Optional[int] = decoder_layerdrop UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : int = auxiliary_loss UpperCAmelCase_ : Optional[Any] = position_embedding_type UpperCAmelCase_ : Tuple = backbone UpperCAmelCase_ : Optional[int] = use_pretrained_backbone UpperCAmelCase_ : Dict = dilation # Hungarian matcher UpperCAmelCase_ : Union[str, Any] = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : int = giou_cost # Loss coefficients UpperCAmelCase_ : str = mask_loss_coefficient UpperCAmelCase_ : Any = dice_loss_coefficient UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient UpperCAmelCase_ : List[str] = giou_loss_coefficient UpperCAmelCase_ : List[Any] = eos_coefficient super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.encoder_attention_heads @property def UpperCamelCase__ ( self ): """simple docstring""" return self.d_model @classmethod def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ): """simple docstring""" return cls(backbone_config=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase_ : str = self.__class__.model_type return output class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def UpperCamelCase__ ( self ): """simple docstring""" return 1E-5 @property def UpperCamelCase__ ( self ): """simple docstring""" return 12
23
1
"""simple docstring""" import random from .binary_exp_mod import bin_exp_mod def __a ( __lowerCamelCase, __lowerCamelCase=1000 ): if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd UpperCAmelCase_ : int = n - 1 UpperCAmelCase_ : Union[str, Any] = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) UpperCAmelCase_ : List[str] = 0 while count < prec: UpperCAmelCase_ : Dict = random.randint(2, n - 1 ) UpperCAmelCase_ : int = bin_exp_mod(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) if b != 1: UpperCAmelCase_ : Optional[int] = True for _ in range(__lowerCamelCase ): if b == n - 1: UpperCAmelCase_ : Any = False break UpperCAmelCase_ : List[str] = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": _a = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
23
"""simple docstring""" _a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution _a = [None] * 10_000_000 _a = True _a = False def __a ( __lowerCamelCase ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) ) UpperCAmelCase_ : List[str] = number_chain while number < 1000_0000: UpperCAmelCase_ : List[Any] = number_chain number *= 10 return number_chain def __a ( __lowerCamelCase = 1000_0000 ): for i in range(1, __lowerCamelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution() = }""")
23
1
"""simple docstring""" class A_ : '''simple docstring''' def __init__( self ): """simple docstring""" UpperCAmelCase_ : dict[str, TrieNode] = {} # Mapping from char to TrieNode UpperCAmelCase_ : str = False def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" for word in words: self.insert(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = self for char in word: if char not in curr.nodes: UpperCAmelCase_ : Any = TrieNode() UpperCAmelCase_ : str = curr.nodes[char] UpperCAmelCase_ : str = True def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self for char in word: if char not in curr.nodes: return False UpperCAmelCase_ : Optional[Any] = curr.nodes[char] return curr.is_leaf def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" def _delete(lowercase_ , lowercase_ , lowercase_ ) -> bool: if index == len(lowercase_ ): # If word does not exist if not curr.is_leaf: return False UpperCAmelCase_ : Tuple = False return len(curr.nodes ) == 0 UpperCAmelCase_ : str = word[index] UpperCAmelCase_ : Optional[Any] = curr.nodes.get(lowercase_ ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted UpperCAmelCase_ : Dict = _delete(lowercase_ , lowercase_ , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , lowercase_ , 0 ) def __a ( __lowerCamelCase, __lowerCamelCase ): if node.is_leaf: print(__lowerCamelCase, end=" " ) for key, value in node.nodes.items(): print_words(__lowerCamelCase, word + key ) def __a ( ): UpperCAmelCase_ : Tuple = "banana bananas bandana band apple all beast".split() UpperCAmelCase_ : List[Any] = TrieNode() root.insert_many(__lowerCamelCase ) # print_words(root, "") assert all(root.find(__lowerCamelCase ) for word in words ) assert root.find("banana" ) assert not root.find("bandanas" ) assert not root.find("apps" ) assert root.find("apple" ) assert root.find("all" ) root.delete("all" ) assert not root.find("all" ) root.delete("banana" ) assert not root.find("banana" ) assert root.find("bananas" ) return True def __a ( __lowerCamelCase, __lowerCamelCase ): print(str(__lowerCamelCase ), "works!" if passes else "doesn't work :(" ) def __a ( ): assert test_trie() def __a ( ): print_results("Testing trie functionality", test_trie() ) if __name__ == "__main__": main()
23
"""simple docstring""" def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # Return True if there is node that has not iterated. UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase ) UpperCAmelCase_ : Any = [] queue.append(__lowerCamelCase ) UpperCAmelCase_ : Tuple = True while queue: UpperCAmelCase_ : str = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowerCamelCase ) UpperCAmelCase_ : Any = True UpperCAmelCase_ : Union[str, Any] = u return visited[t] def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # This array is filled by BFS and to store path UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase )) UpperCAmelCase_ : Any = 0 while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : int = float("Inf" ) UpperCAmelCase_ : Tuple = sink while s != source: # Find the minimum value in select path UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] ) UpperCAmelCase_ : Dict = parent[s] max_flow += path_flow UpperCAmelCase_ : Optional[Any] = sink while v != source: UpperCAmelCase_ : List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow UpperCAmelCase_ : Optional[int] = parent[v] return max_flow _a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _a , _a = 0, 5 print(ford_fulkerson(graph, source, sink))
23
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a = { 'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'], 'tokenization_luke': ['LukeTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST', 'LukeForEntityClassification', 'LukeForEntityPairClassification', 'LukeForEntitySpanClassification', 'LukeForMultipleChoice', 'LukeForQuestionAnswering', 'LukeForSequenceClassification', 'LukeForTokenClassification', 'LukeForMaskedLM', 'LukeModel', 'LukePreTrainedModel', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
23
"""simple docstring""" import datasets _a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' _a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' _a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def __a ( __lowerCamelCase, __lowerCamelCase ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
23
1
"""simple docstring""" import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef _a = ( 'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' ) def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) return (preds == labels).mean() def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0] UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "mrpc": return acc_and_fa(__lowerCamelCase, __lowerCamelCase ) elif task_name == "sts-b": return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase ) elif task_name == "qqp": return acc_and_fa(__lowerCamelCase, __lowerCamelCase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} else: raise KeyError(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} else: raise KeyError(__lowerCamelCase )
23
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _a = logging.get_logger(__name__) class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = feature_size UpperCAmelCase_ : Any = sampling_rate UpperCAmelCase_ : Any = padding_value UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" ) UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ ) super().__init__(**lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): UpperCAmelCase_ : Dict = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]] UpperCAmelCase_ : List[str] = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase_ ) == 0: if return_attention_mask: UpperCAmelCase_ : Union[str, Any] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch UpperCAmelCase_ : List[str] = required_input[0] if isinstance(lowercase_ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. UpperCAmelCase_ : Any = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase_ ): UpperCAmelCase_ : Optional[Any] = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase_ ): UpperCAmelCase_ : Dict = "tf" elif is_torch_tensor(lowercase_ ): UpperCAmelCase_ : Any = "pt" elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ): UpperCAmelCase_ : str = "np" else: raise ValueError( F"""type of {first_element} unknown: {type(lowercase_ )}. """ "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ ) else: UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value] # Convert padding_strategy in PaddingStrategy UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ ) UpperCAmelCase_ : str = processed_features[self.model_input_names[0]] UpperCAmelCase_ : int = len(lowercase_ ) if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) UpperCAmelCase_ : int = [] for i in range(lowercase_ ): UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()} # truncation UpperCAmelCase_ : List[str] = self._truncate( lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , ) truncated_inputs.append(lowercase_ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH UpperCAmelCase_ : List[str] = {} for i in range(lowercase_ ): # padding UpperCAmelCase_ : int = self._pad( truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , ) for key, value in outputs.items(): if key not in batch_outputs: UpperCAmelCase_ : Any = [] if value.dtype is np.dtype(np.floataa ): UpperCAmelCase_ : List[Any] = value.astype(np.floataa ) batch_outputs[key].append(lowercase_ ) return BatchFeature(lowercase_ , tensor_type=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: UpperCAmelCase_ : Tuple = len(lowercase_ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa ) if needs_to_be_padded: UpperCAmelCase_ : Dict = max_length - len(lowercase_ ) if self.padding_side == "right": if return_attention_mask: UpperCAmelCase_ : List[Any] = np.pad( processed_features["attention_mask"] , (0, difference) ) UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) UpperCAmelCase_ : Optional[Any] = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: UpperCAmelCase_ : Optional[Any] = np.pad( processed_features["attention_mask"] , (difference, 0) ) UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) UpperCAmelCase_ : str = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length if needs_to_be_truncated: UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ): """simple docstring""" # Get padding strategy if padding is not False: if padding is True: UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = padding else: UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
23
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _a = logging.get_logger(__name__) if is_vision_available(): import PIL class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = ["""pixel_values"""] def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , lowercase_ = True , **lowercase_ , ): """simple docstring""" super().__init__(**lowercase_ ) UpperCAmelCase_ : List[str] = size if size is not None else {"shortest_edge": 224} UpperCAmelCase_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224} UpperCAmelCase_ : int = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" ) UpperCAmelCase_ : Any = do_resize UpperCAmelCase_ : List[Any] = size UpperCAmelCase_ : Union[str, Any] = resample UpperCAmelCase_ : int = do_center_crop UpperCAmelCase_ : str = crop_size UpperCAmelCase_ : Optional[Any] = do_rescale UpperCAmelCase_ : Tuple = rescale_factor UpperCAmelCase_ : Union[str, Any] = do_normalize UpperCAmelCase_ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCAmelCase_ : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD UpperCAmelCase_ : List[Any] = do_convert_rgb def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) UpperCAmelCase_ : str = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ): """simple docstring""" return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ): """simple docstring""" return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : List[str] = size if size is not None else self.size UpperCAmelCase_ : str = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ ) UpperCAmelCase_ : Optional[Any] = resample if resample is not None else self.resample UpperCAmelCase_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : Tuple = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : List[Any] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ ) UpperCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : List[Any] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : Optional[Any] = image_std if image_std is not None else self.image_std UpperCAmelCase_ : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase_ : Tuple = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase_ : List[str] = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase_ : Dict = [to_numpy_array(lowercase_ ) for image in images] if do_resize: UpperCAmelCase_ : List[str] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: UpperCAmelCase_ : List[str] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: UpperCAmelCase_ : Optional[Any] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: UpperCAmelCase_ : List[Any] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] UpperCAmelCase_ : str = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] UpperCAmelCase_ : Tuple = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
23
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 ) UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 ) UpperCAmelCase_ : Optional[Any] = Accelerator() UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ ) try: pickle.loads(pickle.dumps(lowercase_ ) ) except Exception as e: self.fail(F"""Accelerated optimizer pickling failed with {e}""" ) AcceleratorState._reset_state()
23
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """trocr""" SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : str = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self , lowercase_=5_0265 , lowercase_=1024 , lowercase_=12 , lowercase_=16 , lowercase_=4096 , lowercase_="gelu" , lowercase_=512 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=2 , lowercase_=0.02 , lowercase_=0.0 , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=1 , lowercase_=0 , lowercase_=2 , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = vocab_size UpperCAmelCase_ : Optional[Any] = d_model UpperCAmelCase_ : Optional[int] = decoder_layers UpperCAmelCase_ : Any = decoder_attention_heads UpperCAmelCase_ : Optional[int] = decoder_ffn_dim UpperCAmelCase_ : Any = activation_function UpperCAmelCase_ : int = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = dropout UpperCAmelCase_ : List[Any] = attention_dropout UpperCAmelCase_ : List[Any] = activation_dropout UpperCAmelCase_ : List[str] = init_std UpperCAmelCase_ : Tuple = decoder_layerdrop UpperCAmelCase_ : Any = use_cache UpperCAmelCase_ : Union[str, Any] = scale_embedding UpperCAmelCase_ : Dict = use_learned_position_embeddings UpperCAmelCase_ : List[str] = layernorm_embedding super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
23
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'} class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """ctrl""" SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : List[str] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : Union[str, Any] = n_positions UpperCAmelCase_ : List[str] = n_embd UpperCAmelCase_ : Dict = n_layer UpperCAmelCase_ : Optional[int] = n_head UpperCAmelCase_ : List[str] = dff UpperCAmelCase_ : Tuple = resid_pdrop UpperCAmelCase_ : Optional[Any] = embd_pdrop UpperCAmelCase_ : str = layer_norm_epsilon UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : List[str] = use_cache super().__init__(**lowercase_ )
23
1
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _a = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )] if identifier is not None: UpperCAmelCase_ : Dict = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase_ , lowercase_ ): for n_ in n_identifier: UpperCAmelCase_ : str = [file for file in files if n_ not in file] else: UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file] UpperCAmelCase_ : Union[str, Any] = ignore_files or [] ignore_files.append("__init__.py" ) UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , lowercase_ ) if only_modules: UpperCAmelCase_ : str = file.split("." )[0] try: UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ ) UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F"""{module_identifier} is not a module.""" ) else: UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = Path("src/transformers" ) UpperCAmelCase_ : str = "modeling" UpperCAmelCase_ : Optional[Any] = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = Path("src/transformers" ) UpperCAmelCase_ : Any = "tokenization" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = "configuration" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"] self.analyze_directory(lowercase_ , n_identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = Path("docs/source" ) UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"] self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
23
"""simple docstring""" def __a ( __lowerCamelCase ): assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0""" raise ValueError(__lowerCamelCase ) else: UpperCAmelCase_ : List[str] = sylvester(number - 1 ) UpperCAmelCase_ : List[str] = num - 1 UpperCAmelCase_ : List[str] = num return lower * upper + 1 if __name__ == "__main__": print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
23
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) def __a ( __lowerCamelCase ): UpperCAmelCase_ : int = DPTConfig(embedding_type="hybrid" ) if "large" in checkpoint_url: UpperCAmelCase_ : Dict = 1024 UpperCAmelCase_ : List[str] = 4096 UpperCAmelCase_ : Union[str, Any] = 24 UpperCAmelCase_ : str = 16 UpperCAmelCase_ : Tuple = [5, 11, 17, 23] UpperCAmelCase_ : int = [256, 512, 1024, 1024] UpperCAmelCase_ : Any = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: UpperCAmelCase_ : Optional[int] = 768 UpperCAmelCase_ : List[str] = [1, 1, 1, 0.5] UpperCAmelCase_ : str = [256, 512, 768, 768] UpperCAmelCase_ : str = 150 UpperCAmelCase_ : List[Any] = 16 UpperCAmelCase_ : Optional[Any] = (1, 384, 384) UpperCAmelCase_ : Optional[Any] = False UpperCAmelCase_ : List[str] = "project" if "ade" in checkpoint_url: UpperCAmelCase_ : Any = True UpperCAmelCase_ : Dict = 768 UpperCAmelCase_ : Any = [1, 1, 1, 0.5] UpperCAmelCase_ : Dict = 150 UpperCAmelCase_ : str = 16 UpperCAmelCase_ : Optional[int] = "huggingface/label-files" UpperCAmelCase_ : Dict = "ade20k-id2label.json" UpperCAmelCase_ : List[Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ) ), "r" ) ) UpperCAmelCase_ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase_ : List[Any] = idalabel UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()} UpperCAmelCase_ : List[Any] = [1, 150, 480, 480] return config, expected_shape def __a ( __lowerCamelCase ): UpperCAmelCase_ : Dict = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(__lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCAmelCase_ : int = name.replace("pretrained.model", "dpt.encoder" ) if "pretrained.model" in name: UpperCAmelCase_ : int = name.replace("pretrained.model", "dpt.embeddings" ) if "patch_embed" in name: UpperCAmelCase_ : List[Any] = name.replace("patch_embed", "" ) if "pos_embed" in name: UpperCAmelCase_ : int = name.replace("pos_embed", "position_embeddings" ) if "attn.proj" in name: UpperCAmelCase_ : Optional[int] = name.replace("attn.proj", "attention.output.dense" ) if "proj" in name and "project" not in name: UpperCAmelCase_ : Tuple = name.replace("proj", "projection" ) if "blocks" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("blocks", "layer" ) if "mlp.fc1" in name: UpperCAmelCase_ : Any = name.replace("mlp.fc1", "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase_ : Optional[int] = name.replace("mlp.fc2", "output.dense" ) if "norm1" in name and "backbone" not in name: UpperCAmelCase_ : Union[str, Any] = name.replace("norm1", "layernorm_before" ) if "norm2" in name and "backbone" not in name: UpperCAmelCase_ : Tuple = name.replace("norm2", "layernorm_after" ) if "scratch.output_conv" in name: UpperCAmelCase_ : List[Any] = name.replace("scratch.output_conv", "head" ) if "scratch" in name: UpperCAmelCase_ : int = name.replace("scratch", "neck" ) if "layer1_rn" in name: UpperCAmelCase_ : Dict = name.replace("layer1_rn", "convs.0" ) if "layer2_rn" in name: UpperCAmelCase_ : Dict = name.replace("layer2_rn", "convs.1" ) if "layer3_rn" in name: UpperCAmelCase_ : List[Any] = name.replace("layer3_rn", "convs.2" ) if "layer4_rn" in name: UpperCAmelCase_ : Tuple = name.replace("layer4_rn", "convs.3" ) if "refinenet" in name: UpperCAmelCase_ : Optional[Any] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCAmelCase_ : List[Any] = name.replace(f"""refinenet{layer_idx}""", f"""fusion_stage.layers.{abs(layer_idx-4 )}""" ) if "out_conv" in name: UpperCAmelCase_ : List[str] = name.replace("out_conv", "projection" ) if "resConfUnit1" in name: UpperCAmelCase_ : Optional[int] = name.replace("resConfUnit1", "residual_layer1" ) if "resConfUnit2" in name: UpperCAmelCase_ : int = name.replace("resConfUnit2", "residual_layer2" ) if "conv1" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("conv1", "convolution1" ) if "conv2" in name: UpperCAmelCase_ : int = name.replace("conv2", "convolution2" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCAmelCase_ : List[Any] = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0" ) if "pretrained.act_postprocess2.0.project.0" in name: UpperCAmelCase_ : str = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0" ) if "pretrained.act_postprocess3.0.project.0" in name: UpperCAmelCase_ : Dict = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0" ) if "pretrained.act_postprocess4.0.project.0" in name: UpperCAmelCase_ : str = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0" ) # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCAmelCase_ : Dict = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection" ) if "pretrained.act_postprocess1.4" in name: UpperCAmelCase_ : List[Any] = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize" ) if "pretrained.act_postprocess2.3" in name: UpperCAmelCase_ : Any = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection" ) if "pretrained.act_postprocess2.4" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize" ) if "pretrained.act_postprocess3.3" in name: UpperCAmelCase_ : str = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection" ) if "pretrained.act_postprocess4.3" in name: UpperCAmelCase_ : Any = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection" ) if "pretrained.act_postprocess4.4" in name: UpperCAmelCase_ : Dict = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize" ) if "pretrained" in name: UpperCAmelCase_ : Any = name.replace("pretrained", "dpt" ) if "bn" in name: UpperCAmelCase_ : List[str] = name.replace("bn", "batch_norm" ) if "head" in name: UpperCAmelCase_ : List[str] = name.replace("head", "head.head" ) if "encoder.norm" in name: UpperCAmelCase_ : int = name.replace("encoder.norm", "layernorm" ) if "auxlayer" in name: UpperCAmelCase_ : Any = name.replace("auxlayer", "auxiliary_head.head" ) if "backbone" in name: UpperCAmelCase_ : List[Any] = name.replace("backbone", "backbone.bit.encoder" ) if ".." in name: UpperCAmelCase_ : Union[str, Any] = name.replace("..", "." ) if "stem.conv" in name: UpperCAmelCase_ : str = name.replace("stem.conv", "bit.embedder.convolution" ) if "blocks" in name: UpperCAmelCase_ : Dict = name.replace("blocks", "layers" ) if "convolution" in name and "backbone" in name: UpperCAmelCase_ : str = name.replace("convolution", "conv" ) if "layer" in name and "backbone" in name: UpperCAmelCase_ : Dict = name.replace("layer", "layers" ) if "backbone.bit.encoder.bit" in name: UpperCAmelCase_ : List[str] = name.replace("backbone.bit.encoder.bit", "backbone.bit" ) if "embedder.conv" in name: UpperCAmelCase_ : str = name.replace("embedder.conv", "embedder.convolution" ) if "backbone.bit.encoder.stem.norm" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("backbone.bit.encoder.stem.norm", "backbone.bit.embedder.norm" ) return name def __a ( __lowerCamelCase, __lowerCamelCase ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" ) UpperCAmelCase_ : List[Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : List[str] = in_proj_weight[: config.hidden_size, :] UpperCAmelCase_ : Optional[Any] = in_proj_bias[: config.hidden_size] UpperCAmelCase_ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ : List[Any] = in_proj_bias[-config.hidden_size :] def __a ( ): UpperCAmelCase_ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : Union[str, Any] = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ , UpperCAmelCase_ : int = get_dpt_config(__lowerCamelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") UpperCAmelCase_ : Union[str, Any] = torch.load(__lowerCamelCase, map_location="cpu" ) # remove certain keys remove_ignore_keys_(__lowerCamelCase ) # rename keys for key in state_dict.copy().keys(): UpperCAmelCase_ : Optional[Any] = state_dict.pop(__lowerCamelCase ) UpperCAmelCase_ : Optional[Any] = val # read in qkv matrices read_in_q_k_v(__lowerCamelCase, __lowerCamelCase ) # load HuggingFace model UpperCAmelCase_ : str = DPTForSemanticSegmentation(__lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) model.eval() # Check outputs on an image UpperCAmelCase_ : List[Any] = 480 if "ade" in checkpoint_url else 384 UpperCAmelCase_ : List[str] = DPTImageProcessor(size=__lowerCamelCase ) UpperCAmelCase_ : Any = prepare_img() UpperCAmelCase_ : int = image_processor(__lowerCamelCase, return_tensors="pt" ) # forward pass UpperCAmelCase_ : int = model(**__lowerCamelCase ).logits if "ade" in checkpoint_url else model(**__lowerCamelCase ).predicted_depth if show_prediction: UpperCAmelCase_ : Any = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode="bicubic", align_corners=__lowerCamelCase, ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCamelCase ) if push_to_hub: model.push_to_hub("ybelkada/dpt-hybrid-midas" ) image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) _a = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
23
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""} SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} ) SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def UpperCamelCase__ ( self ): """simple docstring""" return self._get_superresolution_dummy_components() def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ): """simple docstring""" if str(lowercase_ ).startswith("mps" ): UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ ) else: UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) UpperCAmelCase_ : int = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def UpperCamelCase__ ( self ): """simple docstring""" # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_local() def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
23
1
"""simple docstring""" import sys from collections import defaultdict class A_ : '''simple docstring''' def __init__( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = [] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.node_position[vertex] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = pos def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: UpperCAmelCase_ : List[Any] = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: UpperCAmelCase_ : Dict = 2 * start + 1 else: UpperCAmelCase_ : List[str] = 2 * start + 2 if heap[smallest_child] < heap[start]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = heap[smallest_child], positions[smallest_child] UpperCAmelCase_ , UpperCAmelCase_ : List[str] = ( heap[start], positions[start], ) UpperCAmelCase_ , UpperCAmelCase_ : int = temp, tempa UpperCAmelCase_ : Any = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , lowercase_ ) self.top_to_bottom(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = position[index] while index != 0: UpperCAmelCase_ : str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: UpperCAmelCase_ : Any = heap[parent] UpperCAmelCase_ : Tuple = position[parent] self.set_position(position[parent] , lowercase_ ) else: UpperCAmelCase_ : Optional[int] = val UpperCAmelCase_ : Tuple = temp self.set_position(lowercase_ , lowercase_ ) break UpperCAmelCase_ : Dict = parent else: UpperCAmelCase_ : Optional[Any] = val UpperCAmelCase_ : Tuple = temp self.set_position(lowercase_ , 0 ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = len(lowercase_ ) // 2 - 1 for i in range(lowercase_ , -1 , -1 ): self.top_to_bottom(lowercase_ , lowercase_ , len(lowercase_ ) , lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = positions[0] UpperCAmelCase_ : Dict = sys.maxsize self.top_to_bottom(lowercase_ , 0 , len(lowercase_ ) , lowercase_ ) return temp def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = Heap() UpperCAmelCase_ : List[str] = [0] * len(__lowerCamelCase ) UpperCAmelCase_ : Any = [-1] * len(__lowerCamelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph UpperCAmelCase_ : Any = [] # Heap of Distance of vertices from their neighboring vertex UpperCAmelCase_ : Optional[Any] = [] for vertex in range(len(__lowerCamelCase ) ): distance_tv.append(sys.maxsize ) positions.append(__lowerCamelCase ) heap.node_position.append(__lowerCamelCase ) UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Tuple = sys.maxsize for neighbor, distance in adjacency_list[0]: UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[Any] = distance heap.heapify(__lowerCamelCase, __lowerCamelCase ) for _ in range(1, len(__lowerCamelCase ) ): UpperCAmelCase_ : List[str] = heap.delete_minimum(__lowerCamelCase, __lowerCamelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) UpperCAmelCase_ : Optional[Any] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__lowerCamelCase )] ): UpperCAmelCase_ : Any = distance heap.bottom_to_top( __lowerCamelCase, heap.get_position(__lowerCamelCase ), __lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : Tuple = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > _a = int(input('Enter number of edges: ').strip()) _a = defaultdict(list) for _ in range(edges_number): _a = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
23
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small" UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase_ : List[str] = "en_speaker_1" UpperCAmelCase_ : Tuple = "This is a test string" UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json" UpperCAmelCase_ : Any = "speaker_embeddings" def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.get_tokenizer() UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) UpperCAmelCase_ : int = 35 UpperCAmelCase_ : Optional[Any] = 2 UpperCAmelCase_ : List[Any] = 8 UpperCAmelCase_ : Optional[Any] = { "semantic_prompt": np.ones(lowercase_ ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ ) UpperCAmelCase_ : List[str] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" ) np.savez(lowercase_ , **lowercase_ ) UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ ) UpperCAmelCase_ : List[str] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self.get_tokenizer() UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ ) UpperCAmelCase_ : Tuple = processor(text=self.input_string ) UpperCAmelCase_ : Union[str, Any] = tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
23
1
"""simple docstring""" import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList _a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=1 ): """simple docstring""" UpperCAmelCase_ : Any = tokenizer UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Tuple = len(lowercase_ ) if n_tasks is None else n_tasks UpperCAmelCase_ : str = n_copies def __iter__( self ): """simple docstring""" UpperCAmelCase_ : Tuple = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() ) UpperCAmelCase_ : Dict = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = start_length UpperCAmelCase_ : int = eof_strings UpperCAmelCase_ : Optional[int] = tokenizer def __call__( self , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) UpperCAmelCase_ : List[str] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(lowercase_ ) def __a ( __lowerCamelCase ): UpperCAmelCase_ : Union[str, Any] = re.split("(%s)" % "|".join(__lowerCamelCase ), __lowerCamelCase ) # last string should be "" return "".join(string_list[:-2] ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=20, **__lowerCamelCase ): UpperCAmelCase_ : Optional[Any] = defaultdict(__lowerCamelCase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(__lowerCamelCase ) ): with torch.no_grad(): UpperCAmelCase_ : Optional[int] = batch["ids"].shape[-1] UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(__lowerCamelCase ).generate( input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=__lowerCamelCase, **__lowerCamelCase ) # each task is generated batch_size times UpperCAmelCase_ : Tuple = batch["task_id"].repeat(__lowerCamelCase ) UpperCAmelCase_ : Tuple = accelerator.pad_across_processes( __lowerCamelCase, dim=1, pad_index=tokenizer.pad_token_id ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather((generated_tokens, generated_tasks) ) UpperCAmelCase_ : List[Any] = generated_tokens.cpu().numpy() UpperCAmelCase_ : Tuple = generated_tasks.cpu().numpy() for task, generated_tokens in zip(__lowerCamelCase, __lowerCamelCase ): gen_token_dict[task].append(__lowerCamelCase ) UpperCAmelCase_ : str = [[] for _ in range(__lowerCamelCase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: UpperCAmelCase_ : Any = tokenizer.decode(__lowerCamelCase, skip_special_tokens=__lowerCamelCase, clean_up_tokenization_spaces=__lowerCamelCase ) code_gens[task].append(remove_last_block(__lowerCamelCase ) ) return code_gens def __a ( ): # Setup configuration UpperCAmelCase_ : List[Any] = HfArgumentParser(__lowerCamelCase ) UpperCAmelCase_ : Any = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric UpperCAmelCase_ : Tuple = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing UpperCAmelCase_ : Dict = "false" if args.num_workers is None: UpperCAmelCase_ : List[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate UpperCAmelCase_ : List[str] = Accelerator() set_seed(args.seed, device_specific=__lowerCamelCase ) # Load model and tokenizer UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt ) UpperCAmelCase_ : Union[str, Any] = tokenizer.eos_token UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings UpperCAmelCase_ : Optional[Any] = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, __lowerCamelCase, __lowerCamelCase )] ), } # Load evaluation dataset and metric UpperCAmelCase_ : Union[str, Any] = load_dataset("openai_humaneval" ) UpperCAmelCase_ : Any = load_metric("code_eval" ) UpperCAmelCase_ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] ) UpperCAmelCase_ : str = args.n_samples // args.batch_size UpperCAmelCase_ : Union[str, Any] = TokenizedDataset(__lowerCamelCase, human_eval["test"], n_copies=__lowerCamelCase, n_tasks=__lowerCamelCase ) # do not confuse args.batch_size, which is actually the num_return_sequences UpperCAmelCase_ : List[Any] = DataLoader(__lowerCamelCase, batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: UpperCAmelCase_ : Dict = code_eval_metric.compute(references=[""], predictions=[[""]] ) except ValueError as exception: print( "Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`" " flag to enable code evaluation." ) raise exception UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : Tuple = complete_code( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, n_tasks=__lowerCamelCase, batch_size=args.batch_size, **__lowerCamelCase, ) if accelerator.is_main_process: UpperCAmelCase_ : List[str] = [] for task in tqdm(range(__lowerCamelCase ) ): UpperCAmelCase_ : Union[str, Any] = human_eval["test"][task]["test"] UpperCAmelCase_ : Tuple = f"""check({human_eval["test"][task]["entry_point"]})""" references.append("\n" + test_func + "\n" + entry_point ) # Evaluate completions with "code_eval" metric UpperCAmelCase_ , UpperCAmelCase_ : Dict = code_eval_metric.compute( references=__lowerCamelCase, predictions=__lowerCamelCase, num_workers=args.num_workers ) print(f"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file, "w" ) as fp: json.dump(__lowerCamelCase, __lowerCamelCase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
23
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) def __a ( __lowerCamelCase, __lowerCamelCase=False ): UpperCAmelCase_ : Optional[int] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: UpperCAmelCase_ : int = "" else: UpperCAmelCase_ : Union[str, Any] = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size] UpperCAmelCase_ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :] def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase ) UpperCAmelCase_ : Tuple = val def __a ( ): UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[str] = DeiTConfig() # all deit models have fine-tuned heads UpperCAmelCase_ : Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase_ : Tuple = 1000 UpperCAmelCase_ : str = "huggingface/label-files" UpperCAmelCase_ : str = "imagenet-1k-id2label.json" UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) ) UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase_ : Any = idalabel UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()} UpperCAmelCase_ : Any = int(deit_name[-6:-4] ) UpperCAmelCase_ : Dict = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): UpperCAmelCase_ : Any = 192 UpperCAmelCase_ : Union[str, Any] = 768 UpperCAmelCase_ : Union[str, Any] = 12 UpperCAmelCase_ : int = 3 elif deit_name[9:].startswith("small" ): UpperCAmelCase_ : List[str] = 384 UpperCAmelCase_ : List[str] = 1536 UpperCAmelCase_ : Dict = 12 UpperCAmelCase_ : Any = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): UpperCAmelCase_ : int = 1024 UpperCAmelCase_ : List[Any] = 4096 UpperCAmelCase_ : Optional[int] = 24 UpperCAmelCase_ : int = 16 # load original model from timm UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCAmelCase_ : Optional[Any] = timm_model.state_dict() UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # load HuggingFace model UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor UpperCAmelCase_ : Union[str, Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size ) UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" ) UpperCAmelCase_ : int = encoding["pixel_values"] UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase ) UpperCAmelCase_ : Any = timm_model(__lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) _a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
23
1
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def __a ( __lowerCamelCase ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def __a ( __lowerCamelCase ): return (gray > 127) & (gray <= 255) def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Any = np.zeros_like(__lowerCamelCase ) UpperCAmelCase_ : Any = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image UpperCAmelCase_ : Any = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): UpperCAmelCase_ : Union[str, Any] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() UpperCAmelCase_ : Tuple = int(summation > 0 ) return output if __name__ == "__main__": # read original image _a = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg' _a = np.array(Image.open(lena_path)) # kernel to be applied _a = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) _a = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image _a = Image.fromarray(output).convert('RGB') pil_img.save('result_dilation.png')
23
"""simple docstring""" import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ ) UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )] UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin" ) for f in files ) @slow @require_flax class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ ) UpperCAmelCase_ : Tuple = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 ) UpperCAmelCase_ : List[str] = 4 UpperCAmelCase_ : Tuple = jax.device_count() UpperCAmelCase_ : Optional[int] = num_samples * [prompt] UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : int = replicate(lowercase_ ) UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3 assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1 UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(lowercase_ ) == num_samples def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ ) UpperCAmelCase_ : Optional[int] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : str = jax.random.PRNGKey(0 ) UpperCAmelCase_ : Union[str, Any] = 50 UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[str] = num_samples * [prompt] UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Any = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ ) UpperCAmelCase_ : Any = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : str = jax.random.PRNGKey(0 ) UpperCAmelCase_ : str = 50 UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Dict = replicate(lowercase_ ) UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ ) UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa ) UpperCAmelCase_ : List[Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 ) UpperCAmelCase_ : Optional[int] = 50 UpperCAmelCase_ : Optional[int] = jax.device_count() UpperCAmelCase_ : str = num_samples * [prompt] UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ ) UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = shard(lowercase_ ) UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , ) UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , ) UpperCAmelCase_ : List[Any] = scheduler.create_state() UpperCAmelCase_ : int = scheduler_state UpperCAmelCase_ : Union[str, Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 ) UpperCAmelCase_ : int = 50 UpperCAmelCase_ : str = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : int = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = shard(lowercase_ ) UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ ) UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , ) UpperCAmelCase_ : Any = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1] # With memory efficient attention UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , ) UpperCAmelCase_ : str = replicate(lowercase_ ) UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ ) UpperCAmelCase_ : Optional[int] = shard(lowercase_ ) UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
23
1
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _a = logging.get_logger(__name__) _a = {'vocab_file': 'spiece.model'} _a = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_="<s>" , lowercase_="</s>" , lowercase_="<unk>" , lowercase_="<sep>" , lowercase_="<pad>" , lowercase_="<cls>" , lowercase_="<mask>" , lowercase_=["<eop>", "<eod>"] , lowercase_ = None , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) UpperCAmelCase_ : Union[str, Any] = 3 UpperCAmelCase_ : Any = do_lower_case UpperCAmelCase_ : List[Any] = remove_space UpperCAmelCase_ : Optional[int] = keep_accents UpperCAmelCase_ : List[str] = vocab_file UpperCAmelCase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase_ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) UpperCAmelCase_ : int = jieba UpperCAmelCase_ : List[Any] = str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def UpperCamelCase__ ( self ): """simple docstring""" return len(self.sp_model ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" UpperCAmelCase_ : List[str] = self.__dict__.copy() UpperCAmelCase_ : List[Any] = None return state def __setstate__( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): UpperCAmelCase_ : Any = {} UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" if self.remove_space: UpperCAmelCase_ : Any = " ".join(inputs.strip().split() ) else: UpperCAmelCase_ : Union[str, Any] = inputs UpperCAmelCase_ : str = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: UpperCAmelCase_ : int = unicodedata.normalize("NFKD" , lowercase_ ) UpperCAmelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(lowercase_ )] ) if self.do_lower_case: UpperCAmelCase_ : List[Any] = outputs.lower() return outputs def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.preprocess_text(lowercase_ ) UpperCAmelCase_ : List[str] = self.sp_model.encode(lowercase_ , out_type=lowercase_ ) UpperCAmelCase_ : Optional[Any] = [] for piece in pieces: if len(lowercase_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase_ : List[str] = cur_pieces[1:] else: UpperCAmelCase_ : Dict = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(lowercase_ ) else: new_pieces.append(lowercase_ ) return new_pieces def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.sp_model.PieceToId(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.sp_model.IdToPiece(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = "".join(lowercase_ ).replace(lowercase_ , " " ).strip() return out_string def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id] UpperCAmelCase_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ ) if token_ids_a is not None: return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1, 1] return ([0] * len(lowercase_ )) + [1, 1] def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [self.sep_token_id] UpperCAmelCase_ : Union[str, Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" if not os.path.isdir(lowercase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase_ : List[Any] = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_ , "wb" ) as fi: UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,) def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = super()._decode(*lowercase_ , **lowercase_ ) UpperCAmelCase_ : Dict = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
23
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean _a = 0 _a = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right _a = tuple[int, int] class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : int = pos_x UpperCAmelCase_ : List[Any] = pos_y UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x) UpperCAmelCase_ : Any = goal_x UpperCAmelCase_ : Dict = goal_y UpperCAmelCase_ : Any = g_cost UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = self.calculate_heuristic() UpperCAmelCase_ : Any = self.g_cost + self.h_cost def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowercase_ ) + abs(lowercase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , lowercase_ ): """simple docstring""" return self.f_cost < other.f_cost class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ ) UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ ) UpperCAmelCase_ : str = [self.start] UpperCAmelCase_ : list[Node] = [] UpperCAmelCase_ : int = False def UpperCamelCase__ ( self ): """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowercase_ ) self.closed_nodes.append(lowercase_ ) UpperCAmelCase_ : str = self.get_successors(lowercase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowercase_ ) else: # retrieve the best current path UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowercase_ ) else: self.open_nodes.append(lowercase_ ) return [self.start.pos] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Any = [] for action in delta: UpperCAmelCase_ : str = parent.pos_x + action[1] UpperCAmelCase_ : int = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) ) return successors def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = node UpperCAmelCase_ : int = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Optional[int] = current_node.parent path.reverse() return path class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = False def UpperCamelCase__ ( self ): """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 ) UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowercase_ , lowercase_ ) self.fwd_astar.closed_nodes.append(lowercase_ ) self.bwd_astar.closed_nodes.append(lowercase_ ) UpperCAmelCase_ : Tuple = current_bwd_node UpperCAmelCase_ : str = current_fwd_node UpperCAmelCase_ : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowercase_ ) else: # retrieve the best current path UpperCAmelCase_ : List[Any] = astar.open_nodes.pop( astar.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowercase_ ) else: astar.open_nodes.append(lowercase_ ) return [self.fwd_astar.start.pos] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ ) UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ : Any = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] _a = (0, 0) _a = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) _a = time.time() _a = AStar(init, goal) _a = a_star.search() _a = time.time() - start_time print(f"""AStar execution time = {end_time:f} seconds""") _a = time.time() _a = BidirectionalAStar(init, goal) _a = time.time() - bd_start_time print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
23
1
"""simple docstring""" from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging _a = logging.get_logger(__name__) class A_ : '''simple docstring''' SCREAMING_SNAKE_CASE__ : str SCREAMING_SNAKE_CASE__ : str = None @staticmethod def UpperCamelCase__ ( ): """simple docstring""" raise NotImplementedError def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" raise NotImplementedError def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" raise NotImplementedError def UpperCamelCase__ ( self ): """simple docstring""" if not self.is_available(): raise RuntimeError( F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def UpperCamelCase__ ( cls ): """simple docstring""" return F"""`pip install {cls.pip_package or cls.name}`""" class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = """optuna""" @staticmethod def UpperCamelCase__ ( ): """simple docstring""" return is_optuna_available() def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" return run_hp_search_optuna(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return default_hp_space_optuna(lowercase_ ) class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = """ray""" SCREAMING_SNAKE_CASE__ : List[Any] = """'ray[tune]'""" @staticmethod def UpperCamelCase__ ( ): """simple docstring""" return is_ray_available() def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" return run_hp_search_ray(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return default_hp_space_ray(lowercase_ ) class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = """sigopt""" @staticmethod def UpperCamelCase__ ( ): """simple docstring""" return is_sigopt_available() def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" return run_hp_search_sigopt(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return default_hp_space_sigopt(lowercase_ ) class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = """wandb""" @staticmethod def UpperCamelCase__ ( ): """simple docstring""" return is_wandb_available() def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" return run_hp_search_wandb(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return default_hp_space_wandb(lowercase_ ) _a = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __a ( ): UpperCAmelCase_ : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(__lowerCamelCase ) > 0: UpperCAmelCase_ : Tuple = available_backends[0].name if len(__lowerCamelCase ) > 1: logger.info( f"""{len(__lowerCamelCase )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
23
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,) SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),) def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : int = { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowercase_ ) return config def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = dict(self.forward_default_kwargs ) UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = self.dummy_sample UpperCAmelCase_ : Dict = 0.1 * sample UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals UpperCAmelCase_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals UpperCAmelCase_ : int = dummy_past_residuals[:] UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs ) UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ ) UpperCAmelCase_ : Optional[int] = self.dummy_sample UpperCAmelCase_ : List[str] = 0.1 * sample UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : str = self.get_scheduler_config() UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:] UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = self.scheduler_classes[0] UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ ) UpperCAmelCase_ : Tuple = 10 UpperCAmelCase_ : List[str] = self.dummy_model() UpperCAmelCase_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample return sample def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = dict(self.forward_default_kwargs ) UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ ) for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : Any = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ ) UpperCAmelCase_ : str = self.dummy_sample UpperCAmelCase_ : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ): scheduler.set_timesteps(lowercase_ ) elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ): UpperCAmelCase_ : List[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase_ : List[str] = dummy_past_residuals[:] UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase__ ( self ): """simple docstring""" for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_ ) UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0] UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def UpperCamelCase__ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 UpperCAmelCase_ : List[Any] = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : List[Any] = self.dummy_sample UpperCAmelCase_ : Optional[int] = 0.1 * sample UpperCAmelCase_ : List[str] = self.get_scheduler_config() UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample def UpperCamelCase__ ( self ): """simple docstring""" with self.assertRaises(lowercase_ ): UpperCAmelCase_ : List[str] = self.scheduler_classes[0] UpperCAmelCase_ : str = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.full_loop() UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2 assert abs(result_mean.item() - 0.25_80 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 67.39_86 ) < 1E-2 assert abs(result_mean.item() - 0.08_78 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2 assert abs(result_mean.item() - 0.29_95 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2 assert abs(result_mean.item() - 0.24_34 ) < 1E-3
23
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A_ (unittest.TestCase ): '''simple docstring''' @property def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self.dummy_uncond_unet UpperCAmelCase_ : Union[str, Any] = PNDMScheduler() UpperCAmelCase_ : Any = PNDMPipeline(unet=lowercase_ , scheduler=lowercase_ ) pndm.to(lowercase_ ) pndm.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = pndm(generator=lowercase_ , num_inference_steps=20 , output_type="numpy" ).images UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = pndm(generator=lowercase_ , num_inference_steps=20 , output_type="numpy" , return_dict=lowercase_ )[0] UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : Tuple = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = "google/ddpm-cifar10-32" UpperCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained(lowercase_ ) UpperCAmelCase_ : List[str] = PNDMScheduler() UpperCAmelCase_ : Optional[int] = PNDMPipeline(unet=lowercase_ , scheduler=lowercase_ ) pndm.to(lowercase_ ) pndm.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = pndm(generator=lowercase_ , output_type="numpy" ).images UpperCAmelCase_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
23
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _a = object() # For specifying empty leaf dict `{}` _a = object() def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ): UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )] if matches and all(__lowerCamelCase ): return True return False def __a ( __lowerCamelCase ): def replace(__lowerCamelCase, __lowerCamelCase ): for rule, replacement in rules: if _match(__lowerCamelCase, __lowerCamelCase ): return replacement return val return replace def __a ( ): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )), (("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )), (("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def __a ( __lowerCamelCase ): UpperCAmelCase_ : List[str] = _get_partition_rules() UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase ) UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )} UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCamelCase ) )
23
1
"""simple docstring""" import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = DownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : List[Any] = """down""" def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ResnetDownsampleBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : int = """down""" def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = AttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : List[str] = """down""" def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = CrossAttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : List[str] = """down""" def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Dict = super().prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : List[Any] = 32 return init_dict, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = SimpleCrossAttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : Dict = """down""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_encoder_hidden_states=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Any = super().prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : Dict = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = SkipDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : Optional[Any] = """down""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_skip_sample=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = AttnSkipDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : int = """down""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_skip_sample=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = DownEncoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : Any = """down""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_temb=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = { "in_channels": 32, "out_channels": 32, } UpperCAmelCase_ : Tuple = self.dummy_input return init_dict, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = AttnDownEncoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : Tuple = """down""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_temb=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = { "in_channels": 32, "out_channels": 32, } UpperCAmelCase_ : Union[str, Any] = self.dummy_input return init_dict, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = UNetMidBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : str = """mid""" def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = { "in_channels": 32, "temb_channels": 128, } UpperCAmelCase_ : Optional[int] = self.dummy_input return init_dict, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = UNetMidBlockaDCrossAttn # noqa F405 SCREAMING_SNAKE_CASE__ : Dict = """mid""" def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = super().prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : Optional[Any] = 32 return init_dict, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = UNetMidBlockaDSimpleCrossAttn # noqa F405 SCREAMING_SNAKE_CASE__ : Union[str, Any] = """mid""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_encoder_hidden_states=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = super().prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : Any = 32 return init_dict, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = UpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : Optional[int] = """up""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = ResnetUpsampleBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : List[str] = """up""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = CrossAttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : Tuple = """up""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = super().prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : int = 32 return init_dict, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = SimpleCrossAttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : Optional[int] = """up""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ , include_encoder_hidden_states=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = super().prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = 32 return init_dict, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = AttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : Dict = """up""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ ) @unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = SkipUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : str = """up""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = AttnSkipUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : Optional[Any] = """up""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = UpDecoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : Tuple = """up""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_temb=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = {"in_channels": 32, "out_channels": 32} UpperCAmelCase_ : List[Any] = self.dummy_input return init_dict, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37] super().test_output(lowercase_ ) class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = AttnUpDecoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ : str = """up""" @property def UpperCamelCase__ ( self ): """simple docstring""" return super().get_dummy_input(include_temb=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = {"in_channels": 32, "out_channels": 32} UpperCAmelCase_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68] super().test_output(lowercase_ )
23
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _a = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )] if identifier is not None: UpperCAmelCase_ : Dict = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase_ , lowercase_ ): for n_ in n_identifier: UpperCAmelCase_ : str = [file for file in files if n_ not in file] else: UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file] UpperCAmelCase_ : Union[str, Any] = ignore_files or [] ignore_files.append("__init__.py" ) UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , lowercase_ ) if only_modules: UpperCAmelCase_ : str = file.split("." )[0] try: UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ ) UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F"""{module_identifier} is not a module.""" ) else: UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = Path("src/transformers" ) UpperCAmelCase_ : str = "modeling" UpperCAmelCase_ : Optional[Any] = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = Path("src/transformers" ) UpperCAmelCase_ : Any = "tokenization" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = "configuration" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"] self.analyze_directory(lowercase_ , n_identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = Path("docs/source" ) UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"] self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
23
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _a = logging.get_logger(__name__) class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = feature_size UpperCAmelCase_ : Any = sampling_rate UpperCAmelCase_ : Any = padding_value UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" ) UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ ) super().__init__(**lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): UpperCAmelCase_ : Dict = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]] UpperCAmelCase_ : List[str] = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase_ ) == 0: if return_attention_mask: UpperCAmelCase_ : Union[str, Any] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch UpperCAmelCase_ : List[str] = required_input[0] if isinstance(lowercase_ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. UpperCAmelCase_ : Any = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase_ ): UpperCAmelCase_ : Optional[Any] = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase_ ): UpperCAmelCase_ : Dict = "tf" elif is_torch_tensor(lowercase_ ): UpperCAmelCase_ : Any = "pt" elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ): UpperCAmelCase_ : str = "np" else: raise ValueError( F"""type of {first_element} unknown: {type(lowercase_ )}. """ "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ ) else: UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value] # Convert padding_strategy in PaddingStrategy UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ ) UpperCAmelCase_ : str = processed_features[self.model_input_names[0]] UpperCAmelCase_ : int = len(lowercase_ ) if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) UpperCAmelCase_ : int = [] for i in range(lowercase_ ): UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()} # truncation UpperCAmelCase_ : List[str] = self._truncate( lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , ) truncated_inputs.append(lowercase_ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH UpperCAmelCase_ : List[str] = {} for i in range(lowercase_ ): # padding UpperCAmelCase_ : int = self._pad( truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , ) for key, value in outputs.items(): if key not in batch_outputs: UpperCAmelCase_ : Any = [] if value.dtype is np.dtype(np.floataa ): UpperCAmelCase_ : List[Any] = value.astype(np.floataa ) batch_outputs[key].append(lowercase_ ) return BatchFeature(lowercase_ , tensor_type=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: UpperCAmelCase_ : Tuple = len(lowercase_ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa ) if needs_to_be_padded: UpperCAmelCase_ : Dict = max_length - len(lowercase_ ) if self.padding_side == "right": if return_attention_mask: UpperCAmelCase_ : List[Any] = np.pad( processed_features["attention_mask"] , (0, difference) ) UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) UpperCAmelCase_ : Optional[Any] = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: UpperCAmelCase_ : Optional[Any] = np.pad( processed_features["attention_mask"] , (difference, 0) ) UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) UpperCAmelCase_ : str = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length if needs_to_be_truncated: UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ): """simple docstring""" # Get padding strategy if padding is not False: if padding is True: UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = padding else: UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
23
"""simple docstring""" import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef _a = ( 'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' ) def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) return (preds == labels).mean() def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0] UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "mrpc": return acc_and_fa(__lowerCamelCase, __lowerCamelCase ) elif task_name == "sts-b": return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase ) elif task_name == "qqp": return acc_and_fa(__lowerCamelCase, __lowerCamelCase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} else: raise KeyError(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} else: raise KeyError(__lowerCamelCase )
23
1
"""simple docstring""" import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class A_ (lowercase__ ,lowercase__ ,lowercase__ ): '''simple docstring''' @register_to_config def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = False , ): """simple docstring""" super().__init__() UpperCAmelCase_ : int = nn.Embedding(lowercase_ , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = nn.Embedding(lowercase_ , lowercase_ ) UpperCAmelCase_ : Any = False UpperCAmelCase_ : Tuple = nn.Dropout(p=lowercase_ ) UpperCAmelCase_ : Optional[int] = TaConfig( vocab_size=lowercase_ , d_model=lowercase_ , num_heads=lowercase_ , d_kv=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ , feed_forward_proj=lowercase_ , is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , ) UpperCAmelCase_ : int = nn.ModuleList() for lyr_num in range(lowercase_ ): UpperCAmelCase_ : int = TaBlock(lowercase_ ) self.encoders.append(lowercase_ ) UpperCAmelCase_ : Optional[int] = TaLayerNorm(lowercase_ ) UpperCAmelCase_ : Optional[int] = nn.Dropout(p=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = self.token_embedder(lowercase_ ) UpperCAmelCase_ : List[Any] = encoder_input_tokens.shape[1] UpperCAmelCase_ : str = torch.arange(lowercase_ , device=encoder_input_tokens.device ) x += self.position_encoding(lowercase_ ) UpperCAmelCase_ : Optional[Any] = self.dropout_pre(lowercase_ ) # inverted the attention mask UpperCAmelCase_ : str = encoder_input_tokens.size() UpperCAmelCase_ : Optional[int] = self.get_extended_attention_mask(lowercase_ , lowercase_ ) for lyr in self.encoders: UpperCAmelCase_ : Any = lyr(lowercase_ , lowercase_ )[0] UpperCAmelCase_ : List[str] = self.layer_norm(lowercase_ ) return self.dropout_post(lowercase_ ), encoder_inputs_mask
23
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = {'vocab_file': 'vocab.json'} _a = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } _a = {'mgp-str': 27} class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ): """simple docstring""" super().__init__( unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , ) with open(lowercase_ , encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : Dict = json.load(lowercase_ ) UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()} @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [] for s in text: char_tokens.extend(lowercase_ ) return char_tokens def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.decoder.get(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" if not os.path.isdir(lowercase_ ): logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) ) return UpperCAmelCase_ : Optional[int] = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(lowercase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" ) return (vocab_file,)
23
1
"""simple docstring""" from jiwer import compute_measures import datasets _a = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' _a = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' _a = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None , lowercase_=False ): """simple docstring""" if concatenate_texts: return compute_measures(lowercase_ , lowercase_ )["wer"] else: UpperCAmelCase_ : str = 0 UpperCAmelCase_ : int = 0 for prediction, reference in zip(lowercase_ , lowercase_ ): UpperCAmelCase_ : Union[str, Any] = compute_measures(lowercase_ , lowercase_ ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
23
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency _a = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } _a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' _a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def __a ( __lowerCamelCase ): return x[0] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase ) UpperCAmelCase_ : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase ) UpperCAmelCase_ : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase ) UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] ) UpperCAmelCase_ : str = list(freq_to_letter_str.items() ) freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase ) UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(__lowerCamelCase ) def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase ) UpperCAmelCase_ : int = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
23
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json', # See all ViT models at https://huggingface.co/models?filter=vit } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = """vit""" def __init__( self , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=224 , lowercase_=16 , lowercase_=3 , lowercase_=True , lowercase_=16 , **lowercase_ , ): """simple docstring""" super().__init__(**lowercase_ ) UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : str = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : Optional[Any] = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : List[Any] = hidden_dropout_prob UpperCAmelCase_ : Any = attention_probs_dropout_prob UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : List[Any] = layer_norm_eps UpperCAmelCase_ : List[Any] = image_size UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : Optional[int] = qkv_bias UpperCAmelCase_ : List[str] = encoder_stride class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase__ ( self ): """simple docstring""" return 1E-4
23
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _a = logging.getLogger() def __a ( ): UpperCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ : Dict = parser.parse_args() return args.f class A_ (lowercase__ ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(lowercase_ , "argv" , lowercase_ ): UpperCAmelCase_ : List[str] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(lowercase_ , 0.6_66 ) @slow @require_torch_non_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ )
23
1
"""simple docstring""" import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class A_ (unittest.TestCase ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=4 , ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : Tuple = batch_size UpperCAmelCase_ : Any = seq_length UpperCAmelCase_ : Union[str, Any] = is_training UpperCAmelCase_ : str = use_attention_mask UpperCAmelCase_ : List[Any] = use_token_type_ids UpperCAmelCase_ : List[Any] = use_labels UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : List[Any] = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : int = hidden_dropout_prob UpperCAmelCase_ : str = attention_probs_dropout_prob UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : Optional[int] = type_vocab_size UpperCAmelCase_ : List[Any] = type_sequence_label_size UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : str = num_choices def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[str] = None if self.use_attention_mask: UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : List[str] = None if self.use_token_type_ids: UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : List[Any] = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = config_and_inputs UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = config_and_inputs UpperCAmelCase_ : int = True UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = True SCREAMING_SNAKE_CASE__ : Dict = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = FlaxRobertaModelTester(self ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCAmelCase_ : str = model_class_name.from_pretrained("roberta-base" , from_pt=lowercase_ ) UpperCAmelCase_ : Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowercase_ )
23
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
23
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _a = { 'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'], 'tokenization_lxmert': ['LxmertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ['LxmertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'LxmertEncoder', 'LxmertForPreTraining', 'LxmertForQuestionAnswering', 'LxmertModel', 'LxmertPreTrainedModel', 'LxmertVisualFeatureEncoder', 'LxmertXLayer', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLxmertForPreTraining', 'TFLxmertMainLayer', 'TFLxmertModel', 'TFLxmertPreTrainedModel', 'TFLxmertVisualFeatureEncoder', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
23
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _a = logging.get_logger(__name__) # pylint: disable=invalid-name _a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ): UpperCAmelCase_ : List[str] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : Tuple = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" super().__init__() self.register_modules( unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , ) UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" if latents is None: UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) UpperCAmelCase_ : str = latents.to(lowercase_ ) UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma return latents def UpperCamelCase__ ( self , lowercase_=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" ) UpperCAmelCase_ : int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self , lowercase_=0 ): """simple docstring""" if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=lowercase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : List[Any] = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ ) # We'll offload the last model manually. UpperCAmelCase_ : Tuple = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCamelCase__ ( self ): """simple docstring""" if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase_ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowercase_ ) def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : str = self._execution_device UpperCAmelCase_ : List[Any] = guidance_scale > 1.0 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 ) UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ ) self.scheduler.set_timesteps(lowercase_ , device=lowercase_ ) UpperCAmelCase_ : List[Any] = self.scheduler.timesteps UpperCAmelCase_ : List[str] = self.unet.config.in_channels UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor ) # create initial latent UpperCAmelCase_ : int = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds} UpperCAmelCase_ : Optional[Any] = self.unet( sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 ) UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : List[str] = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0] # post-processing UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5 UpperCAmelCase_ : int = image.clamp(0 , 1 ) UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
23
1
"""simple docstring""" import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename _a = 'http://www.mocksite.com/file1.txt' _a = '"text": ["foo", "foo"]' _a = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class A_ : '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = 200 SCREAMING_SNAKE_CASE__ : Dict = {"""Content-Length""": """100"""} SCREAMING_SNAKE_CASE__ : Dict = {} def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" return [bytes(lowercase_ , "utf-8" )] def __a ( *__lowerCamelCase, **__lowerCamelCase ): return MockResponse() @pytest.mark.parametrize("urls_type", [str, list, dict] ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): import requests monkeypatch.setattr(__lowerCamelCase, "request", __lowerCamelCase ) UpperCAmelCase_ : Tuple = URL if issubclass(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Union[str, Any] = url elif issubclass(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Tuple = [url] elif issubclass(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[str] = {"train": url} UpperCAmelCase_ : Union[str, Any] = "dummy" UpperCAmelCase_ : Optional[Any] = "downloads" UpperCAmelCase_ : Optional[int] = tmp_path UpperCAmelCase_ : List[str] = DownloadConfig( cache_dir=os.path.join(__lowerCamelCase, __lowerCamelCase ), use_etag=__lowerCamelCase, ) UpperCAmelCase_ : List[Any] = DownloadManager(dataset_name=__lowerCamelCase, download_config=__lowerCamelCase ) UpperCAmelCase_ : List[Any] = dl_manager.download(__lowerCamelCase ) UpperCAmelCase_ : List[Any] = urls for downloaded_paths in [downloaded_paths]: if isinstance(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : str = [downloaded_paths] UpperCAmelCase_ : int = [urls] elif isinstance(__lowerCamelCase, __lowerCamelCase ): assert "train" in downloaded_paths.keys() UpperCAmelCase_ : Tuple = downloaded_paths.values() UpperCAmelCase_ : str = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__lowerCamelCase, __lowerCamelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] UpperCAmelCase_ : List[str] = Path(__lowerCamelCase ) UpperCAmelCase_ : List[str] = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() UpperCAmelCase_ : Union[str, Any] = downloaded_path.read_text() assert content == CONTENT UpperCAmelCase_ : str = downloaded_path.with_suffix(".json" ) assert metadata_downloaded_path.exists() UpperCAmelCase_ : Dict = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize("paths_type", [str, list, dict] ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Dict = str(__lowerCamelCase ) if issubclass(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[str] = filename elif issubclass(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = [filename] elif issubclass(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[Any] = {"train": filename} UpperCAmelCase_ : Optional[int] = "dummy" UpperCAmelCase_ : List[Any] = xz_file.parent UpperCAmelCase_ : List[Any] = "extracted" UpperCAmelCase_ : Tuple = DownloadConfig( cache_dir=__lowerCamelCase, use_etag=__lowerCamelCase, ) UpperCAmelCase_ : Any = DownloadManager(dataset_name=__lowerCamelCase, download_config=__lowerCamelCase ) UpperCAmelCase_ : List[str] = dl_manager.extract(__lowerCamelCase ) UpperCAmelCase_ : Dict = paths for extracted_paths in [extracted_paths]: if isinstance(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = [extracted_paths] UpperCAmelCase_ : str = [paths] elif isinstance(__lowerCamelCase, __lowerCamelCase ): assert "train" in extracted_paths.keys() UpperCAmelCase_ : str = extracted_paths.values() UpperCAmelCase_ : Tuple = paths.values() assert extracted_paths for extracted_path, input_path in zip(__lowerCamelCase, __lowerCamelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] UpperCAmelCase_ : Union[str, Any] = Path(__lowerCamelCase ) UpperCAmelCase_ : List[str] = extracted_path.parts assert parts[-1] == hash_url_to_filename(__lowerCamelCase, etag=__lowerCamelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() UpperCAmelCase_ : List[str] = extracted_path.read_text() UpperCAmelCase_ : str = text_file.read_text() assert extracted_file_content == expected_file_content def __a ( __lowerCamelCase, __lowerCamelCase ): assert path.endswith(".jsonl" ) for num_items, line in enumerate(__lowerCamelCase, start=1 ): UpperCAmelCase_ : Any = json.loads(line.decode("utf-8" ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("archive_jsonl", ["tar_jsonl_path", "zip_jsonl_path"] ) def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : int = request.getfixturevalue(__lowerCamelCase ) UpperCAmelCase_ : Dict = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ), start=1 ): _test_jsonl(__lowerCamelCase, __lowerCamelCase ) assert num_jsonl == 2 @pytest.mark.parametrize("archive_nested_jsonl", ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] ) def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = request.getfixturevalue(__lowerCamelCase ) UpperCAmelCase_ : int = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ), start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ), start=1 ): _test_jsonl(__lowerCamelCase, __lowerCamelCase ) assert num_tar == 1 assert num_jsonl == 2 def __a ( __lowerCamelCase ): UpperCAmelCase_ : List[Any] = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ), start=1 ): assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
23
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _a = logging.get_logger(__name__) _a = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """detr""" SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = backbone_config.get("model_type" ) UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None UpperCAmelCase_ : int = use_timm_backbone UpperCAmelCase_ : int = backbone_config UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : int = num_queries UpperCAmelCase_ : Union[str, Any] = d_model UpperCAmelCase_ : str = encoder_ffn_dim UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : List[Any] = encoder_attention_heads UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim UpperCAmelCase_ : Optional[Any] = decoder_layers UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads UpperCAmelCase_ : Optional[int] = dropout UpperCAmelCase_ : List[str] = attention_dropout UpperCAmelCase_ : Any = activation_dropout UpperCAmelCase_ : str = activation_function UpperCAmelCase_ : Tuple = init_std UpperCAmelCase_ : Optional[Any] = init_xavier_std UpperCAmelCase_ : Optional[Any] = encoder_layerdrop UpperCAmelCase_ : Optional[int] = decoder_layerdrop UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : int = auxiliary_loss UpperCAmelCase_ : Optional[Any] = position_embedding_type UpperCAmelCase_ : Tuple = backbone UpperCAmelCase_ : Optional[int] = use_pretrained_backbone UpperCAmelCase_ : Dict = dilation # Hungarian matcher UpperCAmelCase_ : Union[str, Any] = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : int = giou_cost # Loss coefficients UpperCAmelCase_ : str = mask_loss_coefficient UpperCAmelCase_ : Any = dice_loss_coefficient UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient UpperCAmelCase_ : List[str] = giou_loss_coefficient UpperCAmelCase_ : List[Any] = eos_coefficient super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.encoder_attention_heads @property def UpperCamelCase__ ( self ): """simple docstring""" return self.d_model @classmethod def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ): """simple docstring""" return cls(backbone_config=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase_ : str = self.__class__.model_type return output class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def UpperCamelCase__ ( self ): """simple docstring""" return 1E-5 @property def UpperCamelCase__ ( self ): """simple docstring""" return 12
23
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = {'vocab_file': 'sentencepiece.model'} _a = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } _a = { 'google/rembert': 256, } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowercase_ , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_="[CLS]" , lowercase_="[SEP]" , lowercase_="[UNK]" , lowercase_="[SEP]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , **lowercase_ , ): """simple docstring""" super().__init__( do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , ) UpperCAmelCase_ : int = do_lower_case UpperCAmelCase_ : Tuple = remove_space UpperCAmelCase_ : str = keep_accents UpperCAmelCase_ : Any = vocab_file UpperCAmelCase_ : Any = spm.SentencePieceProcessor() self.sp_model.Load(lowercase_ ) @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.sp_model ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.__dict__.copy() UpperCAmelCase_ : Union[str, Any] = None return state def __setstate__( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = d UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def UpperCamelCase__ ( self , lowercase_ , lowercase_=False ): """simple docstring""" UpperCAmelCase_ : Tuple = self.sp_model.EncodeAsPieces(lowercase_ ) return pieces def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.sp_model.PieceToId(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.sp_model.IdToPiece(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = self.sp_model.decode_pieces(lowercase_ ) return out_string def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" UpperCAmelCase_ : List[Any] = [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1] return [1] + ([0] * len(lowercase_ )) + [1] def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id] UpperCAmelCase_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" if not os.path.isdir(lowercase_ ): logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) ) return UpperCAmelCase_ : Optional[Any] = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) return (out_vocab_file,)
23
"""simple docstring""" _a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution _a = [None] * 10_000_000 _a = True _a = False def __a ( __lowerCamelCase ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) ) UpperCAmelCase_ : List[str] = number_chain while number < 1000_0000: UpperCAmelCase_ : List[Any] = number_chain number *= 10 return number_chain def __a ( __lowerCamelCase = 1000_0000 ): for i in range(1, __lowerCamelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution() = }""")
23
1
"""simple docstring""" from __future__ import annotations def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[str] = get_failure_array(__lowerCamelCase ) # 2) Step through text searching for pattern UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 0, 0 # index into text, pattern while i < len(__lowerCamelCase ): if pattern[j] == text[i]: if j == (len(__lowerCamelCase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: UpperCAmelCase_ : str = failure[j - 1] continue i += 1 return False def __a ( __lowerCamelCase ): UpperCAmelCase_ : Optional[Any] = [0] UpperCAmelCase_ : Optional[Any] = 0 UpperCAmelCase_ : Dict = 1 while j < len(__lowerCamelCase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: UpperCAmelCase_ : Any = failure[i - 1] continue j += 1 failure.append(__lowerCamelCase ) return failure if __name__ == "__main__": # Test 1) _a = 'abc1abc12' _a = 'alskfjaldsabc1abc1abc12k23adsfabcabc' _a = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) _a = 'ABABX' _a = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) _a = 'AAAB' _a = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) _a = 'abcdabcy' _a = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) _a = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
23
"""simple docstring""" def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # Return True if there is node that has not iterated. UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase ) UpperCAmelCase_ : Any = [] queue.append(__lowerCamelCase ) UpperCAmelCase_ : Tuple = True while queue: UpperCAmelCase_ : str = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowerCamelCase ) UpperCAmelCase_ : Any = True UpperCAmelCase_ : Union[str, Any] = u return visited[t] def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # This array is filled by BFS and to store path UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase )) UpperCAmelCase_ : Any = 0 while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : int = float("Inf" ) UpperCAmelCase_ : Tuple = sink while s != source: # Find the minimum value in select path UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] ) UpperCAmelCase_ : Dict = parent[s] max_flow += path_flow UpperCAmelCase_ : Optional[Any] = sink while v != source: UpperCAmelCase_ : List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow UpperCAmelCase_ : Optional[int] = parent[v] return max_flow _a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _a , _a = 0, 5 print(ford_fulkerson(graph, source, sink))
23
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = data def __iter__( self ): """simple docstring""" for element in self.data: yield element def __a ( __lowerCamelCase=True ): UpperCAmelCase_ : List[str] = Accelerator(even_batches=__lowerCamelCase ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ): if iterable: UpperCAmelCase_ : Any = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) ) else: UpperCAmelCase_ : Dict = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) ) UpperCAmelCase_ : Union[str, Any] = DataLoader(__lowerCamelCase, batch_size=__lowerCamelCase ) UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(__lowerCamelCase ) return dl def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ): UpperCAmelCase_ : int = create_dataloader(accelerator=__lowerCamelCase, dataset_size=__lowerCamelCase, batch_size=__lowerCamelCase ) UpperCAmelCase_ : List[Any] = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def __a ( ): UpperCAmelCase_ : str = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( __lowerCamelCase, dataset_size=3, batch_size=1, process_0_expected_batch_sizes=[1, 1], process_1_expected_batch_sizes=[1, 1], ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( __lowerCamelCase, dataset_size=7, batch_size=2, process_0_expected_batch_sizes=[2, 2], process_1_expected_batch_sizes=[2, 2], ) def __a ( ): UpperCAmelCase_ : str = create_accelerator(even_batches=__lowerCamelCase ) verify_dataloader_batch_sizes( __lowerCamelCase, dataset_size=3, batch_size=1, process_0_expected_batch_sizes=[1, 1], process_1_expected_batch_sizes=[1], ) verify_dataloader_batch_sizes( __lowerCamelCase, dataset_size=7, batch_size=2, process_0_expected_batch_sizes=[2, 2], process_1_expected_batch_sizes=[2, 1], ) def __a ( ): UpperCAmelCase_ : Optional[Any] = create_accelerator(even_batches=__lowerCamelCase ) UpperCAmelCase_ : str = torch.nn.Linear(1, 1 ) UpperCAmelCase_ : Any = accelerator.prepare(__lowerCamelCase ) UpperCAmelCase_ : List[Any] = create_dataloader(__lowerCamelCase, dataset_size=3, batch_size=1 ) UpperCAmelCase_ : Tuple = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(__lowerCamelCase ): UpperCAmelCase_ : Any = ddp_model(batch[0].float() ) UpperCAmelCase_ : Any = output.sum() loss.backward() batch_idxs.append(__lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def __a ( __lowerCamelCase ): with warnings.catch_warnings(record=__lowerCamelCase ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category, __lowerCamelCase ) assert "only supported for multi-GPU" in str(w[-1].message ) def __a ( ): UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : Optional[int] = False UpperCAmelCase_ : Tuple = create_accelerator(even_batches=__lowerCamelCase ) UpperCAmelCase_ : str = torch.nn.Linear(1, 1 ) UpperCAmelCase_ : List[Any] = accelerator.prepare(__lowerCamelCase ) UpperCAmelCase_ : Optional[Any] = create_dataloader(__lowerCamelCase, dataset_size=3, batch_size=1 ) UpperCAmelCase_ : int = create_dataloader(__lowerCamelCase, dataset_size=3, batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model], even_batches=__lowerCamelCase ): UpperCAmelCase_ : Optional[Any] = train_dl.batch_sampler.even_batches UpperCAmelCase_ : Union[str, Any] = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def __a ( ): UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : Dict = False UpperCAmelCase_ : Tuple = create_accelerator(even_batches=__lowerCamelCase ) UpperCAmelCase_ : List[Any] = torch.nn.Linear(1, 1 ) UpperCAmelCase_ : List[str] = accelerator.prepare(__lowerCamelCase ) create_dataloader(__lowerCamelCase, dataset_size=3, batch_size=1, iterable=__lowerCamelCase ) UpperCAmelCase_ : Tuple = create_dataloader(__lowerCamelCase, dataset_size=3, batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model], even_batches=__lowerCamelCase ): UpperCAmelCase_ : List[str] = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def __a ( ): UpperCAmelCase_ : int = create_accelerator() UpperCAmelCase_ : Union[str, Any] = torch.nn.Linear(1, 1 ) UpperCAmelCase_ : Tuple = accelerator.prepare(__lowerCamelCase ) create_dataloader(__lowerCamelCase, dataset_size=3, batch_size=1, iterable=__lowerCamelCase ) with warnings.catch_warnings(record=__lowerCamelCase ) as w: with accelerator.join_uneven_inputs([ddp_model], even_batches=__lowerCamelCase ): pass assert issubclass(w[-1].category, __lowerCamelCase ) assert "only supported for map-style datasets" in str(w[-1].message ) def __a ( ): UpperCAmelCase_ : List[Any] = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) UpperCAmelCase_ : str = accelerator.state.distributed_type UpperCAmelCase_ : Dict = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase ) UpperCAmelCase_ : str = original_state if __name__ == "__main__": main()
23
"""simple docstring""" import datasets _a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' _a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' _a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def __a ( __lowerCamelCase, __lowerCamelCase ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
23
1
"""simple docstring""" import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class A_ (lowercase__ ,lowercase__ ): '''simple docstring''' @register_to_config def __init__( self , lowercase_ = 128 , lowercase_ = 256 , lowercase_ = 20_00.0 , lowercase_ = 768 , lowercase_ = 12 , lowercase_ = 12 , lowercase_ = 64 , lowercase_ = 2048 , lowercase_ = 0.1 , ): """simple docstring""" super().__init__() UpperCAmelCase_ : Dict = nn.Sequential( nn.Linear(lowercase_ , d_model * 4 , bias=lowercase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowercase_ ) , nn.SiLU() , ) UpperCAmelCase_ : Tuple = nn.Embedding(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Tuple = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ ) UpperCAmelCase_ : List[Any] = nn.Dropout(p=lowercase_ ) UpperCAmelCase_ : Optional[Any] = nn.ModuleList() for lyr_num in range(lowercase_ ): # FiLM conditional T5 decoder UpperCAmelCase_ : List[Any] = DecoderLayer(d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ ) self.decoders.append(lowercase_ ) UpperCAmelCase_ : Optional[int] = TaLayerNorm(lowercase_ ) UpperCAmelCase_ : Optional[int] = nn.Dropout(p=lowercase_ ) UpperCAmelCase_ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. UpperCAmelCase_ : str = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) UpperCAmelCase_ : List[Any] = self.conditioning_emb(lowercase_ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) UpperCAmelCase_ : Tuple = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. UpperCAmelCase_ : Tuple = torch.broadcast_to( torch.arange(lowercase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) UpperCAmelCase_ : Union[str, Any] = self.position_encoding(lowercase_ ) UpperCAmelCase_ : str = self.continuous_inputs_projection(lowercase_ ) inputs += position_encodings UpperCAmelCase_ : Union[str, Any] = self.dropout(lowercase_ ) # decoder: No padding present. UpperCAmelCase_ : int = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. UpperCAmelCase_ : Union[str, Any] = [(x, self.encoder_decoder_mask(lowercase_ , lowercase_ )) for x, y in encodings_and_masks] # cross attend style: concat encodings UpperCAmelCase_ : List[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) UpperCAmelCase_ : Optional[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: UpperCAmelCase_ : List[str] = lyr( lowercase_ , conditioning_emb=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )[0] UpperCAmelCase_ : str = self.decoder_norm(lowercase_ ) UpperCAmelCase_ : str = self.post_dropout(lowercase_ ) UpperCAmelCase_ : int = self.spec_out(lowercase_ ) return spec_out class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=1E-6 ): """simple docstring""" super().__init__() UpperCAmelCase_ : List[Any] = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , dropout_rate=lowercase_ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , dropout_rate=lowercase_ , layer_norm_epsilon=lowercase_ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ , layer_norm_epsilon=lowercase_ ) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.layer[0]( lowercase_ , conditioning_emb=lowercase_ , attention_mask=lowercase_ , ) if encoder_hidden_states is not None: UpperCAmelCase_ : Union[str, Any] = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to( encoder_hidden_states.dtype ) UpperCAmelCase_ : Any = self.layer[1]( lowercase_ , key_value_states=lowercase_ , attention_mask=lowercase_ , ) # Apply Film Conditional Feed Forward layer UpperCAmelCase_ : Union[str, Any] = self.layer[-1](lowercase_ , lowercase_ ) return (hidden_states,) class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" super().__init__() UpperCAmelCase_ : Union[str, Any] = TaLayerNorm(lowercase_ ) UpperCAmelCase_ : Dict = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase_ ) UpperCAmelCase_ : int = Attention(query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , out_bias=lowercase_ , scale_qk=lowercase_ ) UpperCAmelCase_ : str = nn.Dropout(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_=None , lowercase_=None , ): """simple docstring""" # pre_self_attention_layer_norm UpperCAmelCase_ : List[Any] = self.layer_norm(lowercase_ ) if conditioning_emb is not None: UpperCAmelCase_ : List[Any] = self.FiLMLayer(lowercase_ , lowercase_ ) # Self-attention block UpperCAmelCase_ : List[Any] = self.attention(lowercase_ ) UpperCAmelCase_ : Tuple = hidden_states + self.dropout(lowercase_ ) return hidden_states class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" super().__init__() UpperCAmelCase_ : Dict = Attention(query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , out_bias=lowercase_ , scale_qk=lowercase_ ) UpperCAmelCase_ : Union[str, Any] = TaLayerNorm(lowercase_ , eps=lowercase_ ) UpperCAmelCase_ : Any = nn.Dropout(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_=None , lowercase_=None , ): """simple docstring""" UpperCAmelCase_ : Dict = self.layer_norm(lowercase_ ) UpperCAmelCase_ : List[Any] = self.attention( lowercase_ , encoder_hidden_states=lowercase_ , attention_mask=attention_mask.squeeze(1 ) , ) UpperCAmelCase_ : int = hidden_states + self.dropout(lowercase_ ) return layer_output class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" super().__init__() UpperCAmelCase_ : Optional[Any] = TaDenseGatedActDense(d_model=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ ) UpperCAmelCase_ : int = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase_ ) UpperCAmelCase_ : List[str] = TaLayerNorm(lowercase_ , eps=lowercase_ ) UpperCAmelCase_ : int = nn.Dropout(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_=None ): """simple docstring""" UpperCAmelCase_ : Dict = self.layer_norm(lowercase_ ) if conditioning_emb is not None: UpperCAmelCase_ : Optional[int] = self.film(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = self.DenseReluDense(lowercase_ ) UpperCAmelCase_ : int = hidden_states + self.dropout(lowercase_ ) return hidden_states class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" super().__init__() UpperCAmelCase_ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ ) UpperCAmelCase_ : List[str] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ ) UpperCAmelCase_ : List[Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ ) UpperCAmelCase_ : Tuple = nn.Dropout(lowercase_ ) UpperCAmelCase_ : str = NewGELUActivation() def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = self.act(self.wi_a(lowercase_ ) ) UpperCAmelCase_ : Tuple = self.wi_a(lowercase_ ) UpperCAmelCase_ : Optional[int] = hidden_gelu * hidden_linear UpperCAmelCase_ : Optional[int] = self.dropout(lowercase_ ) UpperCAmelCase_ : Optional[int] = self.wo(lowercase_ ) return hidden_states class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_=1E-6 ): """simple docstring""" super().__init__() UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.ones(lowercase_ ) ) UpperCAmelCase_ : Optional[int] = eps def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 UpperCAmelCase_ : Any = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowercase_ ) UpperCAmelCase_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: UpperCAmelCase_ : List[str] = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class A_ (nn.Module ): '''simple docstring''' def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowercase_ , 3.0 )) )) class A_ (nn.Module ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" super().__init__() UpperCAmelCase_ : str = nn.Linear(lowercase_ , out_features * 2 , bias=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.scale_bias(lowercase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = torch.chunk(lowercase_ , 2 , -1 ) UpperCAmelCase_ : Tuple = x * (1 + scale) + shift return x
23
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _a = logging.get_logger(__name__) class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = feature_size UpperCAmelCase_ : Any = sampling_rate UpperCAmelCase_ : Any = padding_value UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" ) UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ ) super().__init__(**lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): UpperCAmelCase_ : Dict = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]] UpperCAmelCase_ : List[str] = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase_ ) == 0: if return_attention_mask: UpperCAmelCase_ : Union[str, Any] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch UpperCAmelCase_ : List[str] = required_input[0] if isinstance(lowercase_ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. UpperCAmelCase_ : Any = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase_ ): UpperCAmelCase_ : Optional[Any] = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase_ ): UpperCAmelCase_ : Dict = "tf" elif is_torch_tensor(lowercase_ ): UpperCAmelCase_ : Any = "pt" elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ): UpperCAmelCase_ : str = "np" else: raise ValueError( F"""type of {first_element} unknown: {type(lowercase_ )}. """ "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ ) else: UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value] # Convert padding_strategy in PaddingStrategy UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ ) UpperCAmelCase_ : str = processed_features[self.model_input_names[0]] UpperCAmelCase_ : int = len(lowercase_ ) if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) UpperCAmelCase_ : int = [] for i in range(lowercase_ ): UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()} # truncation UpperCAmelCase_ : List[str] = self._truncate( lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , ) truncated_inputs.append(lowercase_ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH UpperCAmelCase_ : List[str] = {} for i in range(lowercase_ ): # padding UpperCAmelCase_ : int = self._pad( truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , ) for key, value in outputs.items(): if key not in batch_outputs: UpperCAmelCase_ : Any = [] if value.dtype is np.dtype(np.floataa ): UpperCAmelCase_ : List[Any] = value.astype(np.floataa ) batch_outputs[key].append(lowercase_ ) return BatchFeature(lowercase_ , tensor_type=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: UpperCAmelCase_ : Tuple = len(lowercase_ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa ) if needs_to_be_padded: UpperCAmelCase_ : Dict = max_length - len(lowercase_ ) if self.padding_side == "right": if return_attention_mask: UpperCAmelCase_ : List[Any] = np.pad( processed_features["attention_mask"] , (0, difference) ) UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) UpperCAmelCase_ : Optional[Any] = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: UpperCAmelCase_ : Optional[Any] = np.pad( processed_features["attention_mask"] , (difference, 0) ) UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) UpperCAmelCase_ : str = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length if needs_to_be_truncated: UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ): """simple docstring""" # Get padding strategy if padding is not False: if padding is True: UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = padding else: UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
23
1
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor _a = logging.getLogger(__name__) _a = 50 # max width of layer names _a = 70 # max width of quantizer names def __a ( __lowerCamelCase ): UpperCAmelCase_ : Tuple = parser.add_argument_group("quant_trainer arguments" ) group.add_argument("--wprec", type=__lowerCamelCase, default=8, help="weight precision" ) group.add_argument("--aprec", type=__lowerCamelCase, default=8, help="activation precision" ) group.add_argument("--quant-per-tensor", action="store_true", help="per tensor weight scaling" ) group.add_argument("--quant-disable", action="store_true", help="disable all quantizers" ) group.add_argument("--quant-disable-embeddings", action="store_true", help="disable all embeddings quantizers" ) group.add_argument("--quant-disable-keyword", type=__lowerCamelCase, nargs="+", help="disable quantizers by keyword" ) group.add_argument("--quant-disable-layer-module", type=__lowerCamelCase, help="disable quantizers by keyword under layer." ) group.add_argument("--quant-enable-layer-module", type=__lowerCamelCase, help="enable quantizers by keyword under layer" ) group.add_argument("--calibrator", default="max", help="which quantization range calibrator to use" ) group.add_argument("--percentile", default=__lowerCamelCase, type=__lowerCamelCase, help="percentile for PercentileCalibrator" ) group.add_argument("--fuse-qkv", action="store_true", help="use the same scale factor for qkv" ) group.add_argument("--clip-gelu", metavar="N", type=__lowerCamelCase, help="clip gelu output maximum value to N" ) group.add_argument( "--recalibrate-weights", action="store_true", help=( "recalibrate weight amaxes by taking the max of the weights." " amaxes will be computed with the current quantization granularity (axis)." ), ) def __a ( __lowerCamelCase ): if args.calibrator == "max": UpperCAmelCase_ : Union[str, Any] = "max" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError("Specify --percentile when using percentile calibrator" ) UpperCAmelCase_ : Optional[int] = "histogram" elif args.calibrator == "mse": UpperCAmelCase_ : Union[str, Any] = "histogram" else: raise ValueError(f"""Invalid calibrator {args.calibrator}""" ) UpperCAmelCase_ : str = QuantDescriptor(num_bits=args.aprec, calib_method=__lowerCamelCase ) UpperCAmelCase_ : str = QuantDescriptor(num_bits=args.wprec, axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(__lowerCamelCase ) quant_nn.QuantLinear.set_default_quant_desc_weight(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase=False ): logger.info("Configuring Model for Quantization" ) logger.info(f"""using quantization package {pytorch_quantization.__file__}""" ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(__lowerCamelCase, ["embeddings"], which="weight", _disabled=__lowerCamelCase ) if args.quant_disable: set_quantizer_by_name(__lowerCamelCase, [""], _disabled=__lowerCamelCase ) if args.quant_disable_keyword: set_quantizer_by_name(__lowerCamelCase, args.quant_disable_keyword, _disabled=__lowerCamelCase ) if args.quant_disable_layer_module: set_quantizer_by_name(__lowerCamelCase, [r"layer.\d+." + args.quant_disable_layer_module], _disabled=__lowerCamelCase ) if args.quant_enable_layer_module: set_quantizer_by_name(__lowerCamelCase, [r"layer.\d+." + args.quant_enable_layer_module], _disabled=__lowerCamelCase ) if args.recalibrate_weights: recalibrate_weights(__lowerCamelCase ) if args.fuse_qkv: fuse_qkv(__lowerCamelCase, __lowerCamelCase ) if args.clip_gelu: clip_gelu(__lowerCamelCase, args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(__lowerCamelCase ) def __a ( __lowerCamelCase ): logger.info("Enabling Calibration" ) for name, module in model.named_modules(): if name.endswith("_quantizer" ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f"""{name:80}: {module}""" ) def __a ( __lowerCamelCase, __lowerCamelCase ): logger.info("Loading calibrated amax" ) for name, module in model.named_modules(): if name.endswith("_quantizer" ): if module._calibrator is not None: if isinstance(module._calibrator, calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax("percentile", percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase ): def fusea(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): for mod in [qq, qk, qv]: if not hasattr(__lowerCamelCase, "_amax" ): print(" WARNING: NO AMAX BUFFER" ) return UpperCAmelCase_ : Tuple = qq._amax.detach().item() UpperCAmelCase_ : List[Any] = qk._amax.detach().item() UpperCAmelCase_ : Dict = qv._amax.detach().item() UpperCAmelCase_ : Dict = max(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) qq._amax.fill_(__lowerCamelCase ) qk._amax.fill_(__lowerCamelCase ) qv._amax.fill_(__lowerCamelCase ) logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" ) for name, mod in model.named_modules(): if name.endswith(".attention.self" ): logger.info(f"""FUSE_QKV: {name:{name_width}}""" ) fusea(mod.matmul_q_input_quantizer, mod.matmul_k_input_quantizer, mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer, mod.key._weight_quantizer, mod.value._weight_quantizer ) def __a ( __lowerCamelCase, __lowerCamelCase ): for name, mod in model.named_modules(): if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ): UpperCAmelCase_ : Dict = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=__lowerCamelCase ) UpperCAmelCase_ : Union[str, Any] = mod._input_quantizer._amax.data.detach().item() logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" ) def __a ( __lowerCamelCase ): for name, mod in model.named_modules(): if hasattr(__lowerCamelCase, "_weight_quantizer" ) and mod._weight_quantizer.axis is not None: UpperCAmelCase_ : Optional[Any] = mod.weight.shape[0] UpperCAmelCase_ : int = mod._weight_quantizer._amax.detach() UpperCAmelCase_ : List[Any] = torch.ones(__lowerCamelCase, dtype=amax.dtype, device=amax.device ) * amax print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" ) def __a ( __lowerCamelCase ): for name, mod in model.named_modules(): if hasattr(__lowerCamelCase, "_weight_quantizer" ): if not hasattr(mod.weight_quantizer, "_amax" ): print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) UpperCAmelCase_ : Optional[int] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) UpperCAmelCase_ : int = set(range(len(mod.weight.size() ) ) ) - axis_set UpperCAmelCase_ : Any = pytorch_quantization.utils.reduce_amax(mod.weight, axis=__lowerCamelCase, keepdims=__lowerCamelCase ).detach() logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" ) UpperCAmelCase_ : int = amax def __a ( __lowerCamelCase, __lowerCamelCase=25, __lowerCamelCase=180, __lowerCamelCase=None ): if ignore is None: UpperCAmelCase_ : List[Any] = [] elif not isinstance(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = [ignore] UpperCAmelCase_ : Dict = 0 for name, mod in model.named_modules(): if not hasattr(__lowerCamelCase, "weight" ): continue UpperCAmelCase_ : List[Any] = max(__lowerCamelCase, len(__lowerCamelCase ) ) for name, mod in model.named_modules(): UpperCAmelCase_ : List[Any] = getattr(__lowerCamelCase, "_input_quantizer", __lowerCamelCase ) UpperCAmelCase_ : Union[str, Any] = getattr(__lowerCamelCase, "_weight_quantizer", __lowerCamelCase ) if not hasattr(__lowerCamelCase, "weight" ): continue if type(__lowerCamelCase ) in ignore: continue if [True for s in ignore if type(__lowerCamelCase ) is str and s in name]: continue UpperCAmelCase_ : Optional[int] = f"""Act:{input_q.extra_repr()}""" UpperCAmelCase_ : Optional[int] = f"""Wgt:{weight_q.extra_repr()}""" UpperCAmelCase_ : List[str] = f"""{name:{name_width}} {act_str} {wgt_str}""" if len(__lowerCamelCase ) <= line_width: logger.info(__lowerCamelCase ) else: logger.info(f"""{name:{name_width}} {act_str}""" ) logger.info(f"""{" ":{name_width}} {wgt_str}""" ) def __a ( __lowerCamelCase ): UpperCAmelCase_ : Dict = 0 for name, mod in model.named_modules(): if isinstance(__lowerCamelCase, pytorch_quantization.nn.TensorQuantizer ): print(f"""{name:80} {mod}""" ) count += 1 print(f"""{count} TensorQuantizers found in model""" ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Tuple = getattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) if quantizer_mod is not None: assert hasattr(__lowerCamelCase, __lowerCamelCase ) setattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) else: logger.warning(f"""{name} has no {quantizer}""" ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase="both", **__lowerCamelCase ): UpperCAmelCase_ : List[Any] = f"""Warning: changing {which} quantizers of {name:{qname_width}}""" for k, v in kwargs.items(): s += f""" {k}={v}""" if which in ["input", "both"]: set_quantizer(__lowerCamelCase, __lowerCamelCase, "_input_quantizer", __lowerCamelCase, __lowerCamelCase ) if which in ["weight", "both"]: set_quantizer(__lowerCamelCase, __lowerCamelCase, "_weight_quantizer", __lowerCamelCase, __lowerCamelCase ) logger.info(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase, **__lowerCamelCase ): for name, mod in model.named_modules(): if hasattr(__lowerCamelCase, "_input_quantizer" ) or hasattr(__lowerCamelCase, "_weight_quantizer" ): for n in names: if re.search(__lowerCamelCase, __lowerCamelCase ): set_quantizers(__lowerCamelCase, __lowerCamelCase, **__lowerCamelCase ) elif name.endswith("_quantizer" ): for n in names: if re.search(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Union[str, Any] = f"""Warning: changing {name:{name_width}}""" for k, v in kwargs.items(): s += f""" {k}={v}""" setattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) logger.info(__lowerCamelCase )
23
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 ) UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 ) UpperCAmelCase_ : Optional[Any] = Accelerator() UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ ) try: pickle.loads(pickle.dumps(lowercase_ ) ) except Exception as e: self.fail(F"""Accelerated optimizer pickling failed with {e}""" ) AcceleratorState._reset_state()
23
1
"""simple docstring""" def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # Return True if there is node that has not iterated. UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase ) UpperCAmelCase_ : Any = [] queue.append(__lowerCamelCase ) UpperCAmelCase_ : Tuple = True while queue: UpperCAmelCase_ : str = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowerCamelCase ) UpperCAmelCase_ : Any = True UpperCAmelCase_ : Union[str, Any] = u return visited[t] def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # This array is filled by BFS and to store path UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase )) UpperCAmelCase_ : Any = 0 while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : int = float("Inf" ) UpperCAmelCase_ : Tuple = sink while s != source: # Find the minimum value in select path UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] ) UpperCAmelCase_ : Dict = parent[s] max_flow += path_flow UpperCAmelCase_ : Optional[Any] = sink while v != source: UpperCAmelCase_ : List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow UpperCAmelCase_ : Optional[int] = parent[v] return max_flow _a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _a , _a = 0, 5 print(ford_fulkerson(graph, source, sink))
23
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'} class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """ctrl""" SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : List[str] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : Union[str, Any] = n_positions UpperCAmelCase_ : List[str] = n_embd UpperCAmelCase_ : Dict = n_layer UpperCAmelCase_ : Optional[int] = n_head UpperCAmelCase_ : List[str] = dff UpperCAmelCase_ : Tuple = resid_pdrop UpperCAmelCase_ : Optional[Any] = embd_pdrop UpperCAmelCase_ : str = layer_norm_epsilon UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : List[str] = use_cache super().__init__(**lowercase_ )
23
1
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency _a = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } _a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' _a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def __a ( __lowerCamelCase ): return x[0] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase ) UpperCAmelCase_ : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase ) UpperCAmelCase_ : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase ) UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] ) UpperCAmelCase_ : str = list(freq_to_letter_str.items() ) freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase ) UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(__lowerCamelCase ) def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase ) UpperCAmelCase_ : int = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
23
"""simple docstring""" def __a ( __lowerCamelCase ): assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0""" raise ValueError(__lowerCamelCase ) else: UpperCAmelCase_ : List[str] = sylvester(number - 1 ) UpperCAmelCase_ : List[str] = num - 1 UpperCAmelCase_ : List[str] = num return lower * upper + 1 if __name__ == "__main__": print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
23
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6}, }, { """framework""": """pytorch""", """script""": """run_ddp.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6}, }, { """framework""": """tensorflow""", """script""": """run_tf_dist.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7}, }, ] ) class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=lowercase_ , ) assert hasattr(self , "env" ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = F"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}""" # distributed data settings UpperCAmelCase_ : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowercase_ , instance_count=lowercase_ , instance_type=self.instance_type , debugger_hook_config=lowercase_ , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowercase_ , py_version="py36" , ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" TrainingJobAnalytics(lowercase_ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" # create estimator UpperCAmelCase_ : Optional[Any] = self.create_estimator(lowercase_ ) # run training estimator.fit() # result dataframe UpperCAmelCase_ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase_ : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) UpperCAmelCase_ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase_ : Optional[int] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowercase_ )
23
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""} SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} ) SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def UpperCamelCase__ ( self ): """simple docstring""" return self._get_superresolution_dummy_components() def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ): """simple docstring""" if str(lowercase_ ).startswith("mps" ): UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ ) else: UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) UpperCAmelCase_ : int = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def UpperCamelCase__ ( self ): """simple docstring""" # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_local() def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
23
1
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class A_ (lowercase__ ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = tempfile.mkdtemp() UpperCAmelCase_ : str = 8 # DPR tok UpperCAmelCase_ : Dict = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ : int = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) UpperCAmelCase_ : Dict = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok UpperCAmelCase_ : Any = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] UpperCAmelCase_ : Any = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) UpperCAmelCase_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCAmelCase_ : Union[str, Any] = {"unk_token": "<unk>"} UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) UpperCAmelCase_ : Tuple = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : str = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def UpperCamelCase__ ( self ): """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCamelCase__ ( self ): """simple docstring""" return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCamelCase__ ( self ): """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def UpperCamelCase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = self.get_dummy_dataset() UpperCAmelCase_ : Optional[Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: UpperCAmelCase_ : str = dataset UpperCAmelCase_ : Tuple = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Any = self.get_dummy_dataset() UpperCAmelCase_ : int = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , "dataset" ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset UpperCAmelCase_ : Optional[int] = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: UpperCAmelCase_ : Optional[int] = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowercase_ ) , ) return retriever def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCAmelCase_ : int = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) UpperCAmelCase_ : List[Any] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(lowercase_ , open(lowercase_ , "wb" ) ) UpperCAmelCase_ : List[Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) UpperCAmelCase_ : int = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : str = self.get_dummy_canonical_hf_index_retriever() UpperCAmelCase_ : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , lowercase_ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: UpperCAmelCase_ : List[str] = self.get_dummy_dataset() retriever.save_pretrained(lowercase_ ) UpperCAmelCase_ : List[str] = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ : Any = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) UpperCAmelCase_ : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , lowercase_ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) UpperCAmelCase_ : Union[str, Any] = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ : Optional[int] = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = 1 UpperCAmelCase_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) UpperCAmelCase_ : str = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , lowercase_ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) UpperCAmelCase_ : List[Any] = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) UpperCAmelCase_ : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ : Tuple = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Union[str, Any] = self.get_dummy_legacy_index_retriever() UpperCAmelCase_ : int = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , lowercase_ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) UpperCAmelCase_ : Tuple = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) UpperCAmelCase_ : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ : List[Any] = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def UpperCamelCase__ ( self ): """simple docstring""" import torch UpperCAmelCase_ : str = 1 UpperCAmelCase_ : Dict = self.get_dummy_canonical_hf_index_retriever() UpperCAmelCase_ : Any = [[5, 7], [10, 11]] UpperCAmelCase_ : int = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ : str = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertIsInstance(lowercase_ , np.ndarray ) UpperCAmelCase_ : List[str] = retriever( lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ , return_tensors="pt" , ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowercase_ , torch.Tensor ) self.assertIsInstance(lowercase_ , torch.Tensor ) self.assertIsInstance(lowercase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self.get_dpr_ctx_encoder_tokenizer() UpperCAmelCase_ : Optional[int] = 1 UpperCAmelCase_ : int = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) retriever.set_ctx_encoder_tokenizer(lowercase_ ) UpperCAmelCase_ : List[Any] = [[5, 7], [10, 11]] UpperCAmelCase_ : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ : Dict = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ ) self.assertEqual( len(lowercase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , lowercase_ ) # check for doc token related keys in dictionary.
23
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small" UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase_ : List[str] = "en_speaker_1" UpperCAmelCase_ : Tuple = "This is a test string" UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json" UpperCAmelCase_ : Any = "speaker_embeddings" def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.get_tokenizer() UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) UpperCAmelCase_ : int = 35 UpperCAmelCase_ : Optional[Any] = 2 UpperCAmelCase_ : List[Any] = 8 UpperCAmelCase_ : Optional[Any] = { "semantic_prompt": np.ones(lowercase_ ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ ) UpperCAmelCase_ : List[str] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" ) np.savez(lowercase_ , **lowercase_ ) UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ ) UpperCAmelCase_ : List[str] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self.get_tokenizer() UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ ) UpperCAmelCase_ : Tuple = processor(text=self.input_string ) UpperCAmelCase_ : Union[str, Any] = tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
23
1
"""simple docstring""" import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging _a = logging.get_logger(__name__) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ): try: import torch # noqa: F401 except ImportError: logger.error( "Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" " instructions." ) raise if not is_sharded: UpperCAmelCase_ : List[str] = os.path.abspath(__lowerCamelCase ) logger.info(f"""Loading PyTorch weights from {pt_path}""" ) UpperCAmelCase_ : List[str] = torch.load(__lowerCamelCase, map_location="cpu" ) logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" ) UpperCAmelCase_ : Optional[int] = convert_pytorch_state_dict_to_flax(__lowerCamelCase, __lowerCamelCase ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files UpperCAmelCase_ : Optional[int] = convert_pytorch_sharded_state_dict_to_flax(__lowerCamelCase, __lowerCamelCase ) return flax_state_dict def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ): def is_key_or_prefix_key_in_dict(__lowerCamelCase ) -> bool: return len(set(__lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0 # layer norm UpperCAmelCase_ : int = pt_tuple_key[:-1] + ("scale",) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCamelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ("mean",) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var UpperCAmelCase_ : Optional[Any] = pt_tuple_key[:-1] + ("var",) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ): return renamed_pt_tuple_key, pt_tensor # embedding UpperCAmelCase_ : int = pt_tuple_key[:-1] + ("embedding",) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCamelCase ): return renamed_pt_tuple_key, pt_tensor # conv layer UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCamelCase ): UpperCAmelCase_ : Any = pt_tensor.transpose(2, 3, 1, 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ): UpperCAmelCase_ : int = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCAmelCase_ : List[str] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 UpperCAmelCase_ : int = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): UpperCAmelCase_ : Optional[Any] = pt_tuple_key[-2] + "_g" elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): UpperCAmelCase_ : str = pt_tuple_key[-2] + "_v" if name is not None: UpperCAmelCase_ : Dict = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __a ( __lowerCamelCase, __lowerCamelCase ): # convert pytorch tensor to numpy UpperCAmelCase_ : List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} UpperCAmelCase_ : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: UpperCAmelCase_ : List[str] = flax_model.params["params"] else: UpperCAmelCase_ : List[str] = flax_model.params UpperCAmelCase_ : Tuple = flatten_dict(__lowerCamelCase ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: UpperCAmelCase_ : Optional[int] = flatten_dict(flax_model.params["batch_stats"] ) random_flax_state_dict.update(__lowerCamelCase ) UpperCAmelCase_ : Any = {} UpperCAmelCase_ : Optional[int] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()} ) UpperCAmelCase_ : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCAmelCase_ : int = tuple(pt_key.split("." ) ) # remove base model prefix if necessary UpperCAmelCase_ : List[Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: UpperCAmelCase_ : int = pt_tuple_key[1:] # Correctly rename weight parameters UpperCAmelCase_ , UpperCAmelCase_ : List[str] = rename_key_and_reshape_tensor( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # add model prefix if necessary UpperCAmelCase_ : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: UpperCAmelCase_ : Dict = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: UpperCAmelCase_ : Tuple = jnp.asarray(__lowerCamelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__lowerCamelCase, __lowerCamelCase ) continue # also add unexpected weight so that warning is thrown UpperCAmelCase_ : Union[str, Any] = jnp.asarray(__lowerCamelCase ) else: # also add unexpected weight so that warning is thrown UpperCAmelCase_ : Dict = jnp.asarray(__lowerCamelCase ) return unflatten_dict(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase ): import torch # Load the index UpperCAmelCase_ : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils UpperCAmelCase_ : Tuple = torch.load(__lowerCamelCase ) UpperCAmelCase_ : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} UpperCAmelCase_ : Dict = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: UpperCAmelCase_ : Optional[Any] = flax_model.params["params"] UpperCAmelCase_ : Union[str, Any] = flatten_dict(__lowerCamelCase ) random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) ) else: UpperCAmelCase_ : str = flax_model.params UpperCAmelCase_ : Any = flatten_dict(__lowerCamelCase ) UpperCAmelCase_ : Union[str, Any] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()} ) UpperCAmelCase_ : str = (model_prefix in flax_model_params) and ( model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCAmelCase_ : List[Any] = tuple(pt_key.split("." ) ) # remove base model prefix if necessary UpperCAmelCase_ : Dict = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: UpperCAmelCase_ : List[Any] = pt_tuple_key[1:] # Correctly rename weight parameters UpperCAmelCase_ , UpperCAmelCase_ : str = rename_key_and_reshape_tensor( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # add model prefix if necessary UpperCAmelCase_ : Optional[Any] = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: UpperCAmelCase_ : Tuple = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: UpperCAmelCase_ : List[Any] = jnp.asarray(__lowerCamelCase ) continue if "var" in flax_key[-1]: UpperCAmelCase_ : List[str] = jnp.asarray(__lowerCamelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__lowerCamelCase, __lowerCamelCase ) continue # also add unexpected weight so that warning is thrown UpperCAmelCase_ : Optional[int] = jnp.asarray(__lowerCamelCase ) else: # also add unexpected weight so that warning is thrown UpperCAmelCase_ : List[str] = jnp.asarray(__lowerCamelCase ) return unflatten_dict(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Union[str, Any] = os.path.abspath(__lowerCamelCase ) logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" ) # import correct flax class UpperCAmelCase_ : List[str] = getattr(__lowerCamelCase, "Flax" + model.__class__.__name__ ) # load flax weight dict with open(__lowerCamelCase, "rb" ) as state_f: try: UpperCAmelCase_ : List[str] = from_bytes(__lowerCamelCase, state_f.read() ) except UnpicklingError: raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(__lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase ): try: import torch # noqa: F401 except ImportError: logger.error( "Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" " instructions." ) raise # check if we have bf16 weights UpperCAmelCase_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda __lowerCamelCase : x.dtype == jnp.bfloataa, __lowerCamelCase ) ).values() if any(__lowerCamelCase ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( "Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` " "before loading those in PyTorch model." ) UpperCAmelCase_ : Any = jax.tree_util.tree_map( lambda __lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, __lowerCamelCase ) UpperCAmelCase_ : Tuple = flatten_dict(__lowerCamelCase ) UpperCAmelCase_ : List[Any] = pt_model.state_dict() UpperCAmelCase_ : Tuple = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()} ) UpperCAmelCase_ : int = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): UpperCAmelCase_ : List[Any] = flax_key_tuple[0] == pt_model.base_model_prefix UpperCAmelCase_ : Optional[Any] = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: UpperCAmelCase_ : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: UpperCAmelCase_ : List[str] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCamelCase ) not in pt_model_dict: # conv layer UpperCAmelCase_ : Optional[int] = flax_key_tuple[:-1] + ("weight",) UpperCAmelCase_ : str = jnp.transpose(__lowerCamelCase, (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ) not in pt_model_dict: # linear layer UpperCAmelCase_ : Dict = flax_key_tuple[:-1] + ("weight",) UpperCAmelCase_ : Any = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: UpperCAmelCase_ : Optional[int] = flax_key_tuple[:-1] + ("weight",) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: UpperCAmelCase_ : Tuple = flax_key_tuple[:-1] + ("running_mean",) elif "var" in flax_key_tuple[-1]: UpperCAmelCase_ : Dict = flax_key_tuple[:-1] + ("running_var",) if "batch_stats" in flax_state: UpperCAmelCase_ : Optional[Any] = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: UpperCAmelCase_ : Any = ".".join(__lowerCamelCase ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. UpperCAmelCase_ : Optional[int] = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: UpperCAmelCase_ : Union[str, Any] = key.split("." ) UpperCAmelCase_ : int = None if key_components[-3::2] == ["parametrizations", "original0"]: UpperCAmelCase_ : List[str] = key_components[-2] + "_g" elif key_components[-3::2] == ["parametrizations", "original1"]: UpperCAmelCase_ : str = key_components[-2] + "_v" if name is not None: UpperCAmelCase_ : Optional[int] = key_components[:-3] + [name] UpperCAmelCase_ : Tuple = ".".join(__lowerCamelCase ) UpperCAmelCase_ : Union[str, Any] = key if flax_key in special_pt_names: UpperCAmelCase_ : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict UpperCAmelCase_ : Any = np.asarray(__lowerCamelCase ) if not isinstance(__lowerCamelCase, np.ndarray ) else flax_tensor UpperCAmelCase_ : str = torch.from_numpy(__lowerCamelCase ) # remove from missing keys missing_keys.remove(__lowerCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(__lowerCamelCase ) pt_model.load_state_dict(__lowerCamelCase ) # re-transform missing_keys to list UpperCAmelCase_ : List[str] = list(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: logger.warning( "Some weights of the Flax model were not used when initializing the PyTorch model" f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" " (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This" f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a" " FlaxBertForSequenceClassification model)." ) else: logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" ) if len(__lowerCamelCase ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" " use it for predictions and inference." ) else: logger.warning( f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n""" "If your task is similar to the task the model of the checkpoint was trained on, " f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" ) return pt_model
23
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) def __a ( __lowerCamelCase, __lowerCamelCase=False ): UpperCAmelCase_ : Optional[int] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: UpperCAmelCase_ : int = "" else: UpperCAmelCase_ : Union[str, Any] = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size] UpperCAmelCase_ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :] def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase ) UpperCAmelCase_ : Tuple = val def __a ( ): UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[str] = DeiTConfig() # all deit models have fine-tuned heads UpperCAmelCase_ : Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase_ : Tuple = 1000 UpperCAmelCase_ : str = "huggingface/label-files" UpperCAmelCase_ : str = "imagenet-1k-id2label.json" UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) ) UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase_ : Any = idalabel UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()} UpperCAmelCase_ : Any = int(deit_name[-6:-4] ) UpperCAmelCase_ : Dict = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): UpperCAmelCase_ : Any = 192 UpperCAmelCase_ : Union[str, Any] = 768 UpperCAmelCase_ : Union[str, Any] = 12 UpperCAmelCase_ : int = 3 elif deit_name[9:].startswith("small" ): UpperCAmelCase_ : List[str] = 384 UpperCAmelCase_ : List[str] = 1536 UpperCAmelCase_ : Dict = 12 UpperCAmelCase_ : Any = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): UpperCAmelCase_ : int = 1024 UpperCAmelCase_ : List[Any] = 4096 UpperCAmelCase_ : Optional[int] = 24 UpperCAmelCase_ : int = 16 # load original model from timm UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCAmelCase_ : Optional[Any] = timm_model.state_dict() UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # load HuggingFace model UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor UpperCAmelCase_ : Union[str, Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size ) UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" ) UpperCAmelCase_ : int = encoding["pixel_values"] UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase ) UpperCAmelCase_ : Any = timm_model(__lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) _a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
23
1
"""simple docstring""" def __a ( __lowerCamelCase ): UpperCAmelCase_ : int = [0] * len(__lowerCamelCase ) for i in range(1, len(__lowerCamelCase ) ): # use last results for better performance - dynamic programming UpperCAmelCase_ : str = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: UpperCAmelCase_ : Optional[int] = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 UpperCAmelCase_ : Dict = j return prefix_result def __a ( __lowerCamelCase ): return max(prefix_function(__lowerCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
23
"""simple docstring""" import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ ) UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )] UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin" ) for f in files ) @slow @require_flax class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ ) UpperCAmelCase_ : Tuple = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 ) UpperCAmelCase_ : List[str] = 4 UpperCAmelCase_ : Tuple = jax.device_count() UpperCAmelCase_ : Optional[int] = num_samples * [prompt] UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : int = replicate(lowercase_ ) UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3 assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1 UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(lowercase_ ) == num_samples def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ ) UpperCAmelCase_ : Optional[int] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : str = jax.random.PRNGKey(0 ) UpperCAmelCase_ : Union[str, Any] = 50 UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[str] = num_samples * [prompt] UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Any = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ ) UpperCAmelCase_ : Any = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : str = jax.random.PRNGKey(0 ) UpperCAmelCase_ : str = 50 UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Dict = replicate(lowercase_ ) UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ ) UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa ) UpperCAmelCase_ : List[Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 ) UpperCAmelCase_ : Optional[int] = 50 UpperCAmelCase_ : Optional[int] = jax.device_count() UpperCAmelCase_ : str = num_samples * [prompt] UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ ) UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = shard(lowercase_ ) UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , ) UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , ) UpperCAmelCase_ : List[Any] = scheduler.create_state() UpperCAmelCase_ : int = scheduler_state UpperCAmelCase_ : Union[str, Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 ) UpperCAmelCase_ : int = 50 UpperCAmelCase_ : str = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : int = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = shard(lowercase_ ) UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ ) UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , ) UpperCAmelCase_ : Any = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1] # With memory efficient attention UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , ) UpperCAmelCase_ : str = replicate(lowercase_ ) UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ ) UpperCAmelCase_ : Optional[int] = shard(lowercase_ ) UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
23
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor _a = logging.get_logger(__name__) class A_ (lowercase__ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ): """simple docstring""" warnings.warn( "The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use SegformerImageProcessor instead." , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
23
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean _a = 0 _a = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right _a = tuple[int, int] class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : int = pos_x UpperCAmelCase_ : List[Any] = pos_y UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x) UpperCAmelCase_ : Any = goal_x UpperCAmelCase_ : Dict = goal_y UpperCAmelCase_ : Any = g_cost UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = self.calculate_heuristic() UpperCAmelCase_ : Any = self.g_cost + self.h_cost def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowercase_ ) + abs(lowercase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , lowercase_ ): """simple docstring""" return self.f_cost < other.f_cost class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ ) UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ ) UpperCAmelCase_ : str = [self.start] UpperCAmelCase_ : list[Node] = [] UpperCAmelCase_ : int = False def UpperCamelCase__ ( self ): """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowercase_ ) self.closed_nodes.append(lowercase_ ) UpperCAmelCase_ : str = self.get_successors(lowercase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowercase_ ) else: # retrieve the best current path UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowercase_ ) else: self.open_nodes.append(lowercase_ ) return [self.start.pos] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Any = [] for action in delta: UpperCAmelCase_ : str = parent.pos_x + action[1] UpperCAmelCase_ : int = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) ) return successors def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = node UpperCAmelCase_ : int = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Optional[int] = current_node.parent path.reverse() return path class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = False def UpperCamelCase__ ( self ): """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 ) UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowercase_ , lowercase_ ) self.fwd_astar.closed_nodes.append(lowercase_ ) self.bwd_astar.closed_nodes.append(lowercase_ ) UpperCAmelCase_ : Tuple = current_bwd_node UpperCAmelCase_ : str = current_fwd_node UpperCAmelCase_ : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowercase_ ) else: # retrieve the best current path UpperCAmelCase_ : List[Any] = astar.open_nodes.pop( astar.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowercase_ ) else: astar.open_nodes.append(lowercase_ ) return [self.fwd_astar.start.pos] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ ) UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ : Any = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] _a = (0, 0) _a = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) _a = time.time() _a = AStar(init, goal) _a = a_star.search() _a = time.time() - start_time print(f"""AStar execution time = {end_time:f} seconds""") _a = time.time() _a = BidirectionalAStar(init, goal) _a = time.time() - bd_start_time print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
23
1
"""simple docstring""" import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_=3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : Optional[Any] = batch_size UpperCAmelCase_ : Optional[Any] = seq_length UpperCAmelCase_ : List[str] = is_training UpperCAmelCase_ : List[str] = use_input_mask UpperCAmelCase_ : int = use_token_type_ids UpperCAmelCase_ : List[Any] = use_labels UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : List[str] = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : int = attention_probs_dropout_prob UpperCAmelCase_ : List[str] = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Optional[Any] = num_labels UpperCAmelCase_ : Optional[Any] = num_choices UpperCAmelCase_ : Union[str, Any] = scope def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Any = None if self.use_input_mask: UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Any = None UpperCAmelCase_ : int = None UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : Dict = None if self.use_labels: UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowercase_ , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = FalconModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : Dict = model(lowercase_ , attention_mask=lowercase_ ) UpperCAmelCase_ : str = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : Optional[Any] = FalconModel(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : str = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , ) UpperCAmelCase_ : Optional[Any] = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , ) UpperCAmelCase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : List[str] = FalconForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Union[str, Any] = True UpperCAmelCase_ : List[Any] = FalconForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() # first forward pass UpperCAmelCase_ : Tuple = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , ) UpperCAmelCase_ : Tuple = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase_ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase_ : List[str] = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0] UpperCAmelCase_ : str = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0] # select random slice UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : List[str] = config_and_inputs UpperCAmelCase_ : int = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A_ (lowercase__ ,lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = (FalconForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : int = ( { """feature-extraction""": FalconModel, """text-classification""": FalconForSequenceClassification, """text-generation""": FalconForCausalLM, """question-answering""": FalconForQuestionAnswering, """token-classification""": FalconForTokenClassification, """zero-shot""": FalconForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : int = False def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = FalconModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , *UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: UpperCAmelCase_ : Union[str, Any] = alibi self.model_tester.create_and_check_model(lowercase_ , *lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = 3 UpperCAmelCase_ : int = input_dict["input_ids"] UpperCAmelCase_ : Any = input_ids.ne(1 ).to(lowercase_ ) UpperCAmelCase_ : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : int = FalconForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Any = 3 UpperCAmelCase_ : Tuple = "single_label_classification" UpperCAmelCase_ : Dict = input_dict["input_ids"] UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(lowercase_ ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : Optional[Any] = FalconForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : str = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Union[str, Any] = input_dict["input_ids"] UpperCAmelCase_ : Any = FalconForCausalLM(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : Dict = model(lowercase_ , use_cache=lowercase_ ) UpperCAmelCase_ : List[Any] = input_ids.shape[0] UpperCAmelCase_ : Optional[Any] = model._convert_to_rw_cache(result.past_key_values ) UpperCAmelCase_ : Tuple = model._convert_cache_to_standard_format(lowercase_ , lowercase_ ) for layer in range(len(lowercase_ ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Any = "multi_label_classification" UpperCAmelCase_ : Optional[Any] = input_dict["input_ids"] UpperCAmelCase_ : Union[str, Any] = input_ids.ne(1 ).to(lowercase_ ) UpperCAmelCase_ : Tuple = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ : str = FalconForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : Dict = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(lowercase_ , "use_cache" ): return UpperCAmelCase_ : Optional[int] = model_class(lowercase_ ).to(lowercase_ ) if "use_cache" not in inputs: UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : List[Any] = model(**lowercase_ ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return UpperCAmelCase_ : Union[str, Any] = ( getattr(lowercase_ , "decoder_layers" , lowercase_ ) or getattr(lowercase_ , "num_decoder_layers" , lowercase_ ) or config.num_hidden_layers ) UpperCAmelCase_ : Union[str, Any] = getattr(lowercase_ , "num_kv_heads" , config.num_attention_heads ) UpperCAmelCase_ : Tuple = getattr(lowercase_ , "d_model" , config.hidden_size ) UpperCAmelCase_ : int = embed_dim // num_attention_heads UpperCAmelCase_ : Any = outputs["past_key_values"] self.assertEqual(len(lowercase_ ) , lowercase_ ) UpperCAmelCase_ , UpperCAmelCase_ : int = inputs["input_ids"].shape for i in range(lowercase_ ): if config.new_decoder_architecture: UpperCAmelCase_ : Tuple = config.num_attention_heads elif config.multi_query: UpperCAmelCase_ : Dict = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class A_ (unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" ) UpperCAmelCase_ : Any = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" ) model.eval() model.to(lowercase_ ) UpperCAmelCase_ : Optional[int] = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowercase_ ) UpperCAmelCase_ : List[str] = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) UpperCAmelCase_ : str = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=19 ) UpperCAmelCase_ : str = tokenizer.batch_decode(lowercase_ )[0] self.assertEqual(lowercase_ , lowercase_ ) @slow def UpperCamelCase__ ( self ): """simple docstring""" # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(lowercase_ ) UpperCAmelCase_ : Any = FalconForCausalLM.from_pretrained(lowercase_ ) model.eval() model.to(lowercase_ ) UpperCAmelCase_ : int = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowercase_ ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4 ) model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4 ) model.generate(**lowercase_ , num_beams=2 , max_new_tokens=4 ) @slow def UpperCamelCase__ ( self ): """simple docstring""" # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(lowercase_ ) UpperCAmelCase_ : str = FalconForCausalLM.from_pretrained(lowercase_ ) model.eval() model.to(device=lowercase_ ) UpperCAmelCase_ : Dict = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowercase_ ) # Test results are the same with and without cache UpperCAmelCase_ : str = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_ ) UpperCAmelCase_ : Union[str, Any] = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_ ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
23
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,) SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),) def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : int = { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowercase_ ) return config def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = dict(self.forward_default_kwargs ) UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = self.dummy_sample UpperCAmelCase_ : Dict = 0.1 * sample UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals UpperCAmelCase_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals UpperCAmelCase_ : int = dummy_past_residuals[:] UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs ) UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ ) UpperCAmelCase_ : Optional[int] = self.dummy_sample UpperCAmelCase_ : List[str] = 0.1 * sample UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : str = self.get_scheduler_config() UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:] UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = self.scheduler_classes[0] UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ ) UpperCAmelCase_ : Tuple = 10 UpperCAmelCase_ : List[str] = self.dummy_model() UpperCAmelCase_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample return sample def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = dict(self.forward_default_kwargs ) UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ ) for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : Any = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ ) UpperCAmelCase_ : str = self.dummy_sample UpperCAmelCase_ : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ): scheduler.set_timesteps(lowercase_ ) elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ): UpperCAmelCase_ : List[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase_ : List[str] = dummy_past_residuals[:] UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase__ ( self ): """simple docstring""" for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_ ) UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0] UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def UpperCamelCase__ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 UpperCAmelCase_ : List[Any] = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : List[Any] = self.dummy_sample UpperCAmelCase_ : Optional[int] = 0.1 * sample UpperCAmelCase_ : List[str] = self.get_scheduler_config() UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample def UpperCamelCase__ ( self ): """simple docstring""" with self.assertRaises(lowercase_ ): UpperCAmelCase_ : List[str] = self.scheduler_classes[0] UpperCAmelCase_ : str = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.full_loop() UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2 assert abs(result_mean.item() - 0.25_80 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 67.39_86 ) < 1E-2 assert abs(result_mean.item() - 0.08_78 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2 assert abs(result_mean.item() - 0.29_95 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2 assert abs(result_mean.item() - 0.24_34 ) < 1E-3
23
1
"""simple docstring""" import cmath import math def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : int = math.radians(__lowerCamelCase ) UpperCAmelCase_ : Tuple = math.radians(__lowerCamelCase ) # Convert voltage and current to rectangular form UpperCAmelCase_ : Optional[Any] = cmath.rect(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : Optional[int] = cmath.rect(__lowerCamelCase, __lowerCamelCase ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
23
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _a = object() # For specifying empty leaf dict `{}` _a = object() def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ): UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )] if matches and all(__lowerCamelCase ): return True return False def __a ( __lowerCamelCase ): def replace(__lowerCamelCase, __lowerCamelCase ): for rule, replacement in rules: if _match(__lowerCamelCase, __lowerCamelCase ): return replacement return val return replace def __a ( ): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )), (("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )), (("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def __a ( __lowerCamelCase ): UpperCAmelCase_ : List[str] = _get_partition_rules() UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase ) UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )} UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCamelCase ) )
23
1
"""simple docstring""" def __a ( __lowerCamelCase, __lowerCamelCase = False ): if not isinstance(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[Any] = f"""Expected string as input, found {type(__lowerCamelCase )}""" raise ValueError(__lowerCamelCase ) if not isinstance(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = f"""Expected boolean as use_pascal parameter, found {type(__lowerCamelCase )}""" raise ValueError(__lowerCamelCase ) UpperCAmelCase_ : Union[str, Any] = input_str.split("_" ) UpperCAmelCase_ : Any = 0 if use_pascal else 1 UpperCAmelCase_ : Optional[Any] = words[start_index:] UpperCAmelCase_ : Union[str, Any] = [word[0].upper() + word[1:] for word in words_to_capitalize] UpperCAmelCase_ : Union[str, Any] = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
23
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _a = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )] if identifier is not None: UpperCAmelCase_ : Dict = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase_ , lowercase_ ): for n_ in n_identifier: UpperCAmelCase_ : str = [file for file in files if n_ not in file] else: UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file] UpperCAmelCase_ : Union[str, Any] = ignore_files or [] ignore_files.append("__init__.py" ) UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , lowercase_ ) if only_modules: UpperCAmelCase_ : str = file.split("." )[0] try: UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ ) UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F"""{module_identifier} is not a module.""" ) else: UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = Path("src/transformers" ) UpperCAmelCase_ : str = "modeling" UpperCAmelCase_ : Optional[Any] = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = Path("src/transformers" ) UpperCAmelCase_ : Any = "tokenization" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = "configuration" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"] self.analyze_directory(lowercase_ , n_identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = Path("docs/source" ) UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"] self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
23
1
"""simple docstring""" import math import random from typing import Any from .hill_climbing import SearchProblem def __a ( __lowerCamelCase, __lowerCamelCase = True, __lowerCamelCase = math.inf, __lowerCamelCase = -math.inf, __lowerCamelCase = math.inf, __lowerCamelCase = -math.inf, __lowerCamelCase = False, __lowerCamelCase = 100, __lowerCamelCase = 0.01, __lowerCamelCase = 1, ): UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Any = search_prob UpperCAmelCase_ : Any = start_temperate UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : Optional[int] = None while not search_end: UpperCAmelCase_ : int = current_state.score() if best_state is None or current_score > best_state.score(): UpperCAmelCase_ : Union[str, Any] = current_state scores.append(__lowerCamelCase ) iterations += 1 UpperCAmelCase_ : Optional[int] = None UpperCAmelCase_ : Optional[int] = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to UpperCAmelCase_ : List[str] = random.randint(0, len(__lowerCamelCase ) - 1 ) # picking a random neighbor UpperCAmelCase_ : List[Any] = neighbors.pop(__lowerCamelCase ) UpperCAmelCase_ : Dict = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: UpperCAmelCase_ : Optional[Any] = change * -1 # in case we are finding minimum if change > 0: # improves the solution UpperCAmelCase_ : Optional[Any] = picked_neighbor else: UpperCAmelCase_ : str = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability UpperCAmelCase_ : str = picked_neighbor UpperCAmelCase_ : List[str] = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor UpperCAmelCase_ : Optional[int] = True else: UpperCAmelCase_ : List[str] = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__lowerCamelCase ), __lowerCamelCase ) plt.xlabel("Iterations" ) plt.ylabel("Function values" ) plt.show() return best_state if __name__ == "__main__": def __a ( __lowerCamelCase, __lowerCamelCase ): return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) _a = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _a = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( 'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ' f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) _a = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _a = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( 'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ' f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def __a ( __lowerCamelCase, __lowerCamelCase ): return (3 * x**2) - (6 * y) _a = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _a = simulated_annealing(prob, find_max=False, visualization=True) print( 'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ' f"""{local_min.score()}""" ) _a = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _a = simulated_annealing(prob, find_max=True, visualization=True) print( 'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ' f"""{local_min.score()}""" )
23
"""simple docstring""" import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef _a = ( 'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' ) def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) return (preds == labels).mean() def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0] UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "mrpc": return acc_and_fa(__lowerCamelCase, __lowerCamelCase ) elif task_name == "sts-b": return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase ) elif task_name == "qqp": return acc_and_fa(__lowerCamelCase, __lowerCamelCase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} else: raise KeyError(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} else: raise KeyError(__lowerCamelCase )
23
1
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup _a = logging.get_logger(__name__) class A_ (lowercase__ ): '''simple docstring''' def __init__( self , **lowercase_ ): """simple docstring""" requires_backends(self , ["bs4"] ) super().__init__(**lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCAmelCase_ : Optional[int] = parent.find_all(child.name , recursive=lowercase_ ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(lowercase_ ) else next(i for i, s in enumerate(lowercase_ , 1 ) if s is child ) ) UpperCAmelCase_ : Union[str, Any] = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = BeautifulSoup(lowercase_ , "html.parser" ) UpperCAmelCase_ : int = [] UpperCAmelCase_ : Any = [] UpperCAmelCase_ : List[Any] = [] for element in html_code.descendants: if type(lowercase_ ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCAmelCase_ : List[str] = html.unescape(lowercase_ ).strip() if not text_in_this_tag: continue all_doc_strings.append(lowercase_ ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.xpath_soup(lowercase_ ) stringaxtag_seq.append(lowercase_ ) stringaxsubs_seq.append(lowercase_ ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError("Number of doc strings and xtags does not correspond" ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError("Number of doc strings and xsubs does not correspond" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = "" for tagname, subs in zip(lowercase_ , lowercase_ ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : int = False # Check that strings has a valid type if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : List[str] = True elif isinstance(lowercase_ , (list, tuple) ): if len(lowercase_ ) == 0 or isinstance(html_strings[0] , lowercase_ ): UpperCAmelCase_ : List[str] = True if not valid_strings: raise ValueError( "HTML strings must of type `str`, `List[str]` (batch of examples), " F"""but is of type {type(lowercase_ )}.""" ) UpperCAmelCase_ : str = bool(isinstance(lowercase_ , (list, tuple) ) and (isinstance(html_strings[0] , lowercase_ )) ) if not is_batched: UpperCAmelCase_ : Tuple = [html_strings] # Get nodes + xpaths UpperCAmelCase_ : int = [] UpperCAmelCase_ : List[Any] = [] for html_string in html_strings: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.get_three_from_single(lowercase_ ) nodes.append(lowercase_ ) UpperCAmelCase_ : Tuple = [] for node, tag_list, sub_list in zip(lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase_ : Any = self.construct_xpath(lowercase_ , lowercase_ ) xpath_strings.append(lowercase_ ) xpaths.append(lowercase_ ) # return as Dict UpperCAmelCase_ : Optional[int] = {"nodes": nodes, "xpaths": xpaths} UpperCAmelCase_ : Union[str, Any] = BatchFeature(data=lowercase_ , tensor_type=lowercase_ ) return encoded_inputs
23
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = {'vocab_file': 'vocab.json'} _a = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } _a = {'mgp-str': 27} class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ): """simple docstring""" super().__init__( unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , ) with open(lowercase_ , encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : Dict = json.load(lowercase_ ) UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()} @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [] for s in text: char_tokens.extend(lowercase_ ) return char_tokens def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.decoder.get(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" if not os.path.isdir(lowercase_ ): logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) ) return UpperCAmelCase_ : Optional[int] = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(lowercase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" ) return (vocab_file,)
23
1
"""simple docstring""" def __a ( __lowerCamelCase ): assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0""" raise ValueError(__lowerCamelCase ) else: UpperCAmelCase_ : List[str] = sylvester(number - 1 ) UpperCAmelCase_ : List[str] = num - 1 UpperCAmelCase_ : List[str] = num return lower * upper + 1 if __name__ == "__main__": print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
23
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency _a = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } _a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' _a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def __a ( __lowerCamelCase ): return x[0] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase ) UpperCAmelCase_ : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase ) UpperCAmelCase_ : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase ) UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] ) UpperCAmelCase_ : str = list(freq_to_letter_str.items() ) freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase ) UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(__lowerCamelCase ) def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase ) UpperCAmelCase_ : int = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
23
1
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file UpperCAmelCase_ : Tuple = TapasConfig.from_json_file(__lowerCamelCase ) # set absolute/relative position embeddings parameter UpperCAmelCase_ : Union[str, Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": UpperCAmelCase_ : str = TapasForQuestionAnswering(config=__lowerCamelCase ) elif task == "WTQ": # run_task_main.py hparams UpperCAmelCase_ : List[Any] = 4 UpperCAmelCase_ : Dict = True # hparam_utils.py hparams UpperCAmelCase_ : Optional[int] = 0.66_4694 UpperCAmelCase_ : List[str] = 0.20_7951 UpperCAmelCase_ : Union[str, Any] = 0.12_1194 UpperCAmelCase_ : Dict = True UpperCAmelCase_ : Union[str, Any] = True UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : str = 0.035_2513 UpperCAmelCase_ : List[Any] = TapasForQuestionAnswering(config=__lowerCamelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams UpperCAmelCase_ : Dict = 4 UpperCAmelCase_ : Union[str, Any] = False # hparam_utils.py hparams UpperCAmelCase_ : Optional[int] = 36.4519 UpperCAmelCase_ : List[Any] = 0.90_3421 UpperCAmelCase_ : Union[str, Any] = 222.088 UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : Any = True UpperCAmelCase_ : Dict = True UpperCAmelCase_ : str = 0.76_3141 UpperCAmelCase_ : Dict = TapasForQuestionAnswering(config=__lowerCamelCase ) elif task == "TABFACT": UpperCAmelCase_ : List[str] = TapasForSequenceClassification(config=__lowerCamelCase ) elif task == "MLM": UpperCAmelCase_ : Optional[int] = TapasForMaskedLM(config=__lowerCamelCase ) elif task == "INTERMEDIATE_PRETRAINING": UpperCAmelCase_ : Any = TapasModel(config=__lowerCamelCase ) else: raise ValueError(f"""Task {task} not supported.""" ) print(f"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # Save pytorch-model (weights and configuration) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__lowerCamelCase ) # Save tokenizer files print(f"""Save tokenizer files to {pytorch_dump_path}""" ) UpperCAmelCase_ : Dict = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt", model_max_length=512 ) tokenizer.save_pretrained(__lowerCamelCase ) print("Used relative position embeddings:", model.config.reset_position_index_per_cell ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.' ) parser.add_argument( '--reset_position_index_per_cell', default=False, action='store_true', help='Whether to use relative position embeddings or not. Defaults to True.', ) parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--tapas_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained TAPAS model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
23
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _a = logging.getLogger() def __a ( ): UpperCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ : Dict = parser.parse_args() return args.f class A_ (lowercase__ ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(lowercase_ , "argv" , lowercase_ ): UpperCAmelCase_ : List[str] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(lowercase_ , 0.6_66 ) @slow @require_torch_non_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ )
23
1
"""simple docstring""" def __a ( __lowerCamelCase ): # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence UpperCAmelCase_ : Dict = gray_code_sequence_string(__lowerCamelCase ) # # convert them to integers for i in range(len(__lowerCamelCase ) ): UpperCAmelCase_ : str = int(sequence[i], 2 ) return sequence def __a ( __lowerCamelCase ): # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] UpperCAmelCase_ : Dict = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits UpperCAmelCase_ : int = gray_code_sequence_string(bit_count - 1 ) UpperCAmelCase_ : Dict = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): UpperCAmelCase_ : List[Any] = "0" + smaller_sequence[i] sequence.append(__lowerCamelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): UpperCAmelCase_ : Union[str, Any] = "1" + smaller_sequence[i] sequence.append(__lowerCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
23
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
23
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _a = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
23
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _a = logging.get_logger(__name__) # pylint: disable=invalid-name _a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ): UpperCAmelCase_ : List[str] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : Tuple = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" super().__init__() self.register_modules( unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , ) UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" if latents is None: UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) UpperCAmelCase_ : str = latents.to(lowercase_ ) UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma return latents def UpperCamelCase__ ( self , lowercase_=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" ) UpperCAmelCase_ : int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self , lowercase_=0 ): """simple docstring""" if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=lowercase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : List[Any] = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ ) # We'll offload the last model manually. UpperCAmelCase_ : Tuple = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCamelCase__ ( self ): """simple docstring""" if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase_ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowercase_ ) def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : str = self._execution_device UpperCAmelCase_ : List[Any] = guidance_scale > 1.0 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 ) UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ ) self.scheduler.set_timesteps(lowercase_ , device=lowercase_ ) UpperCAmelCase_ : List[Any] = self.scheduler.timesteps UpperCAmelCase_ : List[str] = self.unet.config.in_channels UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor ) # create initial latent UpperCAmelCase_ : int = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds} UpperCAmelCase_ : Optional[Any] = self.unet( sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 ) UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : List[str] = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0] # post-processing UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5 UpperCAmelCase_ : int = image.clamp(0 , 1 ) UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
23
1
"""simple docstring""" import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class A_ (lowercase__ ): '''simple docstring''' def __init__( self ): """simple docstring""" UpperCAmelCase_ : Dict = [] def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" self.events.append("on_init_end" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" self.events.append("on_train_begin" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" self.events.append("on_train_end" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" self.events.append("on_epoch_begin" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" self.events.append("on_epoch_end" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" self.events.append("on_step_begin" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" self.events.append("on_step_end" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" self.events.append("on_evaluate" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" self.events.append("on_predict" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" self.events.append("on_save" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" self.events.append("on_log" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" self.events.append("on_prediction_step" ) @require_torch class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = tempfile.mkdtemp() def UpperCamelCase__ ( self ): """simple docstring""" shutil.rmtree(self.output_dir ) def UpperCamelCase__ ( self , lowercase_=0 , lowercase_=0 , lowercase_=64 , lowercase_=64 , lowercase_=None , lowercase_=False , **lowercase_ ): """simple docstring""" # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. UpperCAmelCase_ : Dict = RegressionDataset(length=lowercase_ ) UpperCAmelCase_ : List[str] = RegressionDataset(length=lowercase_ ) UpperCAmelCase_ : Optional[int] = RegressionModelConfig(a=lowercase_ , b=lowercase_ ) UpperCAmelCase_ : Optional[Any] = RegressionPreTrainedModel(lowercase_ ) UpperCAmelCase_ : Dict = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ ) return Trainer( lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) # Order doesn't matter UpperCAmelCase_ : Optional[int] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ ) UpperCAmelCase_ : Union[str, Any] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ ) for cba, cba in zip(lowercase_ , lowercase_ ): if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ): self.assertEqual(lowercase_ , lowercase_ ) elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ): self.assertEqual(lowercase_ , cba.__class__ ) elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ): self.assertEqual(cba.__class__ , lowercase_ ) else: self.assertEqual(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = ["on_init_end", "on_train_begin"] UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : str = len(trainer.get_eval_dataloader() ) UpperCAmelCase_ : str = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs ): expected_events.append("on_epoch_begin" ) for _ in range(lowercase_ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save" ) expected_events.append("on_epoch_end" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = self.get_trainer() UpperCAmelCase_ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) # Callbacks passed at init are added to the default callbacks UpperCAmelCase_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback UpperCAmelCase_ : Any = self.get_trainer(disable_tqdm=lowercase_ ) UpperCAmelCase_ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback] UpperCAmelCase_ : List[Any] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(lowercase_ ) expected_callbacks.remove(lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) UpperCAmelCase_ : Dict = self.get_trainer() UpperCAmelCase_ : List[Any] = trainer.pop_callback(lowercase_ ) self.assertEqual(cb.__class__ , lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) trainer.add_callback(lowercase_ ) expected_callbacks.insert(0 , lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) # We can also add, pop, or remove by instance UpperCAmelCase_ : List[Any] = self.get_trainer() UpperCAmelCase_ : Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(lowercase_ ) expected_callbacks.remove(lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) UpperCAmelCase_ : Dict = self.get_trainer() UpperCAmelCase_ : str = trainer.callback_handler.callbacks[0] UpperCAmelCase_ : List[str] = trainer.pop_callback(lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) trainer.add_callback(lowercase_ ) expected_callbacks.insert(0 , lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore" , category=lowercase_ ) UpperCAmelCase_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() UpperCAmelCase_ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) # Independent log/save/eval UpperCAmelCase_ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() UpperCAmelCase_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) UpperCAmelCase_ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() UpperCAmelCase_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) UpperCAmelCase_ : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" ) trainer.train() UpperCAmelCase_ : List[Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) UpperCAmelCase_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" ) trainer.train() UpperCAmelCase_ : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) # A bit of everything UpperCAmelCase_ : int = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , ) trainer.train() UpperCAmelCase_ : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning" ) as warn_mock: UpperCAmelCase_ : int = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(lowercase_ ) in warn_mock.call_args[0][0]
23
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _a = logging.get_logger(__name__) _a = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """detr""" SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = backbone_config.get("model_type" ) UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None UpperCAmelCase_ : int = use_timm_backbone UpperCAmelCase_ : int = backbone_config UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : int = num_queries UpperCAmelCase_ : Union[str, Any] = d_model UpperCAmelCase_ : str = encoder_ffn_dim UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : List[Any] = encoder_attention_heads UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim UpperCAmelCase_ : Optional[Any] = decoder_layers UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads UpperCAmelCase_ : Optional[int] = dropout UpperCAmelCase_ : List[str] = attention_dropout UpperCAmelCase_ : Any = activation_dropout UpperCAmelCase_ : str = activation_function UpperCAmelCase_ : Tuple = init_std UpperCAmelCase_ : Optional[Any] = init_xavier_std UpperCAmelCase_ : Optional[Any] = encoder_layerdrop UpperCAmelCase_ : Optional[int] = decoder_layerdrop UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : int = auxiliary_loss UpperCAmelCase_ : Optional[Any] = position_embedding_type UpperCAmelCase_ : Tuple = backbone UpperCAmelCase_ : Optional[int] = use_pretrained_backbone UpperCAmelCase_ : Dict = dilation # Hungarian matcher UpperCAmelCase_ : Union[str, Any] = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : int = giou_cost # Loss coefficients UpperCAmelCase_ : str = mask_loss_coefficient UpperCAmelCase_ : Any = dice_loss_coefficient UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient UpperCAmelCase_ : List[str] = giou_loss_coefficient UpperCAmelCase_ : List[Any] = eos_coefficient super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.encoder_attention_heads @property def UpperCamelCase__ ( self ): """simple docstring""" return self.d_model @classmethod def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ): """simple docstring""" return cls(backbone_config=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase_ : str = self.__class__.model_type return output class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def UpperCamelCase__ ( self ): """simple docstring""" return 1E-5 @property def UpperCamelCase__ ( self ): """simple docstring""" return 12
23
1
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _a = logging.getLogger() def __a ( ): UpperCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ : Dict = parser.parse_args() return args.f class A_ (lowercase__ ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(lowercase_ , "argv" , lowercase_ ): UpperCAmelCase_ : List[str] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(lowercase_ , 0.6_66 ) @slow @require_torch_non_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ )
23
"""simple docstring""" _a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution _a = [None] * 10_000_000 _a = True _a = False def __a ( __lowerCamelCase ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) ) UpperCAmelCase_ : List[str] = number_chain while number < 1000_0000: UpperCAmelCase_ : List[Any] = number_chain number *= 10 return number_chain def __a ( __lowerCamelCase = 1000_0000 ): for i in range(1, __lowerCamelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution() = }""")
23
1
"""simple docstring""" import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class A_ (unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = MODEL_FOR_MASKED_LM_MAPPING SCREAMING_SNAKE_CASE__ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" ) UpperCAmelCase_ : Optional[int] = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(lowercase_ , decimals=6 ) , [ {"sequence": "My name is grouped", "score": 2.1E-0_5, "token": 3_8015, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1E-0_5, "token": 2_5506, "token_str": " accuser"}, ] , ) UpperCAmelCase_ : Union[str, Any] = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(lowercase_ , decimals=6 ) , [ { "sequence": "The largest city in France is grouped", "score": 2.1E-0_5, "token": 3_8015, "token_str": " grouped", }, { "sequence": "The largest city in France is accuser", "score": 2.1E-0_5, "token": 2_5506, "token_str": " accuser", }, ] , ) UpperCAmelCase_ : Union[str, Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(lowercase_ , decimals=6 ) , [ {"sequence": "My name is Clara", "score": 2E-0_5, "token": 1_3606, "token_str": " Clara"}, {"sequence": "My name is Patrick", "score": 2E-0_5, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 1.9E-0_5, "token": 2941, "token_str": " Te"}, ] , ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" ) UpperCAmelCase_ : Any = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(lowercase_ , decimals=6 ) , [ {"sequence": "My name is Maul", "score": 2.2E-0_5, "token": 3_5676, "token_str": " Maul"}, {"sequence": "My name isELS", "score": 2.2E-0_5, "token": 1_6416, "token_str": "ELS"}, ] , ) UpperCAmelCase_ : Any = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(lowercase_ , decimals=6 ) , [ { "sequence": "The largest city in France is Maul", "score": 2.2E-0_5, "token": 3_5676, "token_str": " Maul", }, {"sequence": "The largest city in France isELS", "score": 2.2E-0_5, "token": 1_6416, "token_str": "ELS"}, ] , ) UpperCAmelCase_ : Optional[int] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(lowercase_ , decimals=6 ) , [ {"sequence": "My name is Patrick", "score": 2.1E-0_5, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 2E-0_5, "token": 2941, "token_str": " Te"}, {"sequence": "My name is Clara", "score": 2E-0_5, "token": 1_3606, "token_str": " Clara"}, ] , ) UpperCAmelCase_ : List[Any] = unmasker("My name is <mask> <mask>" , top_k=2 ) self.assertEqual( nested_simplify(lowercase_ , decimals=6 ) , [ [ { "score": 2.2E-0_5, "token": 3_5676, "token_str": " Maul", "sequence": "<s>My name is Maul<mask></s>", }, {"score": 2.2E-0_5, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"}, ], [ { "score": 2.2E-0_5, "token": 3_5676, "token_str": " Maul", "sequence": "<s>My name is<mask> Maul</s>", }, {"score": 2.2E-0_5, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"}, ], ] , ) @require_torch_gpu def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" ) # convert model to fp16 pipe.model.half() UpperCAmelCase_ : Optional[int] = pipe("Paris is the [MASK] of France." ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(lowercase_ , lowercase_ ) @slow @require_torch def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" ) self.run_large_test(lowercase_ ) @slow @require_tf def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" ) self.run_large_test(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(lowercase_ ) , [ {"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"}, {"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"}, ] , ) UpperCAmelCase_ : List[Any] = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(lowercase_ ) , [ { "sequence": "The largest city in France is Paris", "score": 0.2_51, "token": 2201, "token_str": " Paris", }, { "sequence": "The largest city in France is Lyon", "score": 0.2_14, "token": 1_2790, "token_str": " Lyon", }, ] , ) UpperCAmelCase_ : str = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(lowercase_ ) , [ {"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Clara", "score": 0.0_00, "token": 1_3606, "token_str": " Clara"}, {"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"}, ] , ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" ) UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Union[str, Any] = None self.run_pipeline_test(lowercase_ , [] ) @require_tf def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" ) UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : Dict = None self.run_pipeline_test(lowercase_ , [] ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" ) UpperCAmelCase_ : Union[str, Any] = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ ) UpperCAmelCase_ : Any = [ F"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = fill_masker.tokenizer UpperCAmelCase_ : Optional[Any] = fill_masker.model UpperCAmelCase_ : Union[str, Any] = fill_masker( F"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( lowercase_ , [ {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, ] , ) UpperCAmelCase_ : Union[str, Any] = fill_masker([F"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( lowercase_ , [ {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, ] , ) UpperCAmelCase_ : Union[str, Any] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( lowercase_ , [ [ {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, ], [ {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, ], ] , ) with self.assertRaises(lowercase_ ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(lowercase_ ): fill_masker("This is" ) self.run_test_top_k(lowercase_ , lowercase_ ) self.run_test_targets(lowercase_ , lowercase_ ) self.run_test_top_k_targets(lowercase_ , lowercase_ ) self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_ ) self.fill_mask_with_multiple_masks(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = tokenizer.get_vocab() UpperCAmelCase_ : Optional[Any] = sorted(vocab.keys() )[:2] # Pipeline argument UpperCAmelCase_ : List[str] = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_ ) UpperCAmelCase_ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( lowercase_ , [ {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, ] , ) UpperCAmelCase_ : List[Any] = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs} , lowercase_ ) UpperCAmelCase_ : Any = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_ ) ) # Call argument UpperCAmelCase_ : Optional[Any] = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ ) UpperCAmelCase_ : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=lowercase_ ) self.assertEqual( lowercase_ , [ {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, ] , ) UpperCAmelCase_ : Tuple = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs} , lowercase_ ) UpperCAmelCase_ : List[Any] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_ ) ) # Score equivalence UpperCAmelCase_ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=lowercase_ ) UpperCAmelCase_ : Tuple = [top_mask["token_str"] for top_mask in outputs] UpperCAmelCase_ : Dict = [top_mask["score"] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowercase_ ) == set(lowercase_ ): UpperCAmelCase_ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=lowercase_ ) UpperCAmelCase_ : Union[str, Any] = [top_mask["score"] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(lowercase_ ) , nested_simplify(lowercase_ ) ) # Raises with invalid with self.assertRaises(lowercase_ ): UpperCAmelCase_ : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(lowercase_ ): UpperCAmelCase_ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[""] ) with self.assertRaises(lowercase_ ): UpperCAmelCase_ : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets="" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2 ) UpperCAmelCase_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( lowercase_ , [ {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, ] , ) UpperCAmelCase_ : Dict = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ ) UpperCAmelCase_ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( lowercase_ , [ {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, ] , ) self.assertEqual(nested_simplify(lowercase_ ) , nested_simplify(lowercase_ ) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = tokenizer.get_vocab() UpperCAmelCase_ : Any = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ ) # top_k=2, ntargets=3 UpperCAmelCase_ : Union[str, Any] = sorted(vocab.keys() )[:3] UpperCAmelCase_ : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=lowercase_ ) # If we use the most probably targets, and filter differently, we should still # have the same results UpperCAmelCase_ : List[str] = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_ : x["score"] , reverse=lowercase_ )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowercase_ ).issubset(lowercase_ ): UpperCAmelCase_ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=lowercase_ ) # They should yield exactly the same result self.assertEqual(nested_simplify(lowercase_ ) , nested_simplify(lowercase_ ) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ ) UpperCAmelCase_ : Optional[Any] = tokenizer.get_vocab() # String duplicates + id duplicates UpperCAmelCase_ : int = sorted(vocab.keys() )[:3] UpperCAmelCase_ : Optional[int] = [targets[0], targets[1], targets[0], targets[2], targets[1]] UpperCAmelCase_ : Union[str, Any] = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=lowercase_ , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(lowercase_ ) , 3 ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ ) UpperCAmelCase_ : Union[str, Any] = fill_masker( F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( lowercase_ , [ [ {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, ], [ {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, ], [ {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, {"sequence": ANY(lowercase_ ), "score": ANY(lowercase_ ), "token": ANY(lowercase_ ), "token_str": ANY(lowercase_ )}, ], ] , )
23
"""simple docstring""" def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # Return True if there is node that has not iterated. UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase ) UpperCAmelCase_ : Any = [] queue.append(__lowerCamelCase ) UpperCAmelCase_ : Tuple = True while queue: UpperCAmelCase_ : str = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowerCamelCase ) UpperCAmelCase_ : Any = True UpperCAmelCase_ : Union[str, Any] = u return visited[t] def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # This array is filled by BFS and to store path UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase )) UpperCAmelCase_ : Any = 0 while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : int = float("Inf" ) UpperCAmelCase_ : Tuple = sink while s != source: # Find the minimum value in select path UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] ) UpperCAmelCase_ : Dict = parent[s] max_flow += path_flow UpperCAmelCase_ : Optional[Any] = sink while v != source: UpperCAmelCase_ : List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow UpperCAmelCase_ : Optional[int] = parent[v] return max_flow _a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _a , _a = 0, 5 print(ford_fulkerson(graph, source, sink))
23
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A_ (unittest.TestCase ): '''simple docstring''' @property def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Dict = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = self.dummy_uncond_unet UpperCAmelCase_ : List[Any] = ScoreSdeVeScheduler() UpperCAmelCase_ : Any = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ ) sde_ve.to(lowercase_ ) sde_ve.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=lowercase_ ).images UpperCAmelCase_ : int = torch.manual_seed(0 ) UpperCAmelCase_ : Any = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=lowercase_ , return_dict=lowercase_ )[ 0 ] UpperCAmelCase_ : str = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = "google/ncsnpp-church-256" UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(lowercase_ ) UpperCAmelCase_ : int = ScoreSdeVeScheduler.from_pretrained(lowercase_ ) UpperCAmelCase_ : List[str] = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ ) sde_ve.to(lowercase_ ) sde_ve.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase_ : Any = torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=lowercase_ ).images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase_ : str = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
23
"""simple docstring""" import datasets _a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' _a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' _a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def __a ( __lowerCamelCase, __lowerCamelCase ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
23
1
"""simple docstring""" def __a ( __lowerCamelCase ): return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') _a = int(input('Enter number: ').strip()) print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
23
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _a = logging.get_logger(__name__) class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = feature_size UpperCAmelCase_ : Any = sampling_rate UpperCAmelCase_ : Any = padding_value UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" ) UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ ) super().__init__(**lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): UpperCAmelCase_ : Dict = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]] UpperCAmelCase_ : List[str] = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase_ ) == 0: if return_attention_mask: UpperCAmelCase_ : Union[str, Any] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch UpperCAmelCase_ : List[str] = required_input[0] if isinstance(lowercase_ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. UpperCAmelCase_ : Any = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase_ ): UpperCAmelCase_ : Optional[Any] = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase_ ): UpperCAmelCase_ : Dict = "tf" elif is_torch_tensor(lowercase_ ): UpperCAmelCase_ : Any = "pt" elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ): UpperCAmelCase_ : str = "np" else: raise ValueError( F"""type of {first_element} unknown: {type(lowercase_ )}. """ "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ ) else: UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value] # Convert padding_strategy in PaddingStrategy UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ ) UpperCAmelCase_ : str = processed_features[self.model_input_names[0]] UpperCAmelCase_ : int = len(lowercase_ ) if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) UpperCAmelCase_ : int = [] for i in range(lowercase_ ): UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()} # truncation UpperCAmelCase_ : List[str] = self._truncate( lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , ) truncated_inputs.append(lowercase_ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH UpperCAmelCase_ : List[str] = {} for i in range(lowercase_ ): # padding UpperCAmelCase_ : int = self._pad( truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , ) for key, value in outputs.items(): if key not in batch_outputs: UpperCAmelCase_ : Any = [] if value.dtype is np.dtype(np.floataa ): UpperCAmelCase_ : List[Any] = value.astype(np.floataa ) batch_outputs[key].append(lowercase_ ) return BatchFeature(lowercase_ , tensor_type=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: UpperCAmelCase_ : Tuple = len(lowercase_ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa ) if needs_to_be_padded: UpperCAmelCase_ : Dict = max_length - len(lowercase_ ) if self.padding_side == "right": if return_attention_mask: UpperCAmelCase_ : List[Any] = np.pad( processed_features["attention_mask"] , (0, difference) ) UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) UpperCAmelCase_ : Optional[Any] = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: UpperCAmelCase_ : Optional[Any] = np.pad( processed_features["attention_mask"] , (difference, 0) ) UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) UpperCAmelCase_ : str = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length if needs_to_be_truncated: UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ): """simple docstring""" # Get padding strategy if padding is not False: if padding is True: UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = padding else: UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
23
1
"""simple docstring""" from __future__ import annotations def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): if len(__lowerCamelCase ) == 0: raise ValueError("find_max() arg is an empty sequence" ) if ( left >= len(__lowerCamelCase ) or left < -len(__lowerCamelCase ) or right >= len(__lowerCamelCase ) or right < -len(__lowerCamelCase ) ): raise IndexError("list index out of range" ) if left == right: return nums[left] UpperCAmelCase_ : int = (left + right) >> 1 # the middle UpperCAmelCase_ : Union[str, Any] = find_max(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # find max in range[left, mid] UpperCAmelCase_ : Any = find_max(__lowerCamelCase, mid + 1, __lowerCamelCase ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
23
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 ) UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 ) UpperCAmelCase_ : Optional[Any] = Accelerator() UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ ) try: pickle.loads(pickle.dumps(lowercase_ ) ) except Exception as e: self.fail(F"""Accelerated optimizer pickling failed with {e}""" ) AcceleratorState._reset_state()
23
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor _a = logging.get_logger(__name__) class A_ (lowercase__ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ): """simple docstring""" warnings.warn( "The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PoolFormerImageProcessor instead." , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
23
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'} class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """ctrl""" SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : List[str] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : Union[str, Any] = n_positions UpperCAmelCase_ : List[str] = n_embd UpperCAmelCase_ : Dict = n_layer UpperCAmelCase_ : Optional[int] = n_head UpperCAmelCase_ : List[str] = dff UpperCAmelCase_ : Tuple = resid_pdrop UpperCAmelCase_ : Optional[Any] = embd_pdrop UpperCAmelCase_ : str = layer_norm_epsilon UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : List[str] = use_cache super().__init__(**lowercase_ )
23
1
"""simple docstring""" def __a ( __lowerCamelCase ): if not nums: # Makes sure that the list is not empty raise ValueError("List is empty" ) UpperCAmelCase_ : Tuple = sum(__lowerCamelCase ) / len(__lowerCamelCase ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
23
"""simple docstring""" def __a ( __lowerCamelCase ): assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0""" raise ValueError(__lowerCamelCase ) else: UpperCAmelCase_ : List[str] = sylvester(number - 1 ) UpperCAmelCase_ : List[str] = num - 1 UpperCAmelCase_ : List[str] = num return lower * upper + 1 if __name__ == "__main__": print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
23
1
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def __a ( __lowerCamelCase="ro", __lowerCamelCase="en", __lowerCamelCase="wmt16", __lowerCamelCase=None ): try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("run pip install datasets" ) UpperCAmelCase_ : List[Any] = f"""{src_lang}-{tgt_lang}""" print(f"""Converting {dataset}-{pair}""" ) UpperCAmelCase_ : str = datasets.load_dataset(__lowerCamelCase, __lowerCamelCase ) if save_dir is None: UpperCAmelCase_ : Dict = f"""{dataset}-{pair}""" UpperCAmelCase_ : Dict = Path(__lowerCamelCase ) save_dir.mkdir(exist_ok=__lowerCamelCase ) for split in ds.keys(): print(f"""Splitting {split} with {ds[split].num_rows} records""" ) # to save to val.source, val.target like summary datasets UpperCAmelCase_ : Tuple = "val" if split == "validation" else split UpperCAmelCase_ : List[str] = save_dir.joinpath(f"""{fn}.source""" ) UpperCAmelCase_ : List[Any] = save_dir.joinpath(f"""{fn}.target""" ) UpperCAmelCase_ : int = src_path.open("w+" ) UpperCAmelCase_ : Union[str, Any] = tgt_path.open("w+" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): UpperCAmelCase_ : int = x["translation"] src_fp.write(ex[src_lang] + "\n" ) tgt_fp.write(ex[tgt_lang] + "\n" ) print(f"""Saved {dataset} dataset to {save_dir}""" ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
23
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""} SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} ) SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def UpperCamelCase__ ( self ): """simple docstring""" return self._get_superresolution_dummy_components() def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ): """simple docstring""" if str(lowercase_ ).startswith("mps" ): UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ ) else: UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) UpperCAmelCase_ : int = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def UpperCamelCase__ ( self ): """simple docstring""" # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_local() def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
23
1
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example _a = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example _a = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def __a ( __lowerCamelCase ): UpperCAmelCase_ : int = [] for i in range(len(__lowerCamelCase ) ): UpperCAmelCase_ : Tuple = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours UpperCAmelCase_ : int = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(__lowerCamelCase ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(__lowerCamelCase ) - 1: neighbour_count += cells[i + 1][j] if i < len(__lowerCamelCase ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. UpperCAmelCase_ : Optional[Any] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(__lowerCamelCase ) return next_generation def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : str = [] for _ in range(__lowerCamelCase ): # Create output image UpperCAmelCase_ : str = Image.new("RGB", (len(cells[0] ), len(__lowerCamelCase )) ) UpperCAmelCase_ : str = img.load() # Save cells to image for x in range(len(__lowerCamelCase ) ): for y in range(len(cells[0] ) ): UpperCAmelCase_ : Union[str, Any] = 255 - cells[y][x] * 255 UpperCAmelCase_ : List[Any] = (colour, colour, colour) # Save image images.append(__lowerCamelCase ) UpperCAmelCase_ : Union[str, Any] = new_generation(__lowerCamelCase ) return images if __name__ == "__main__": _a = generate_images(GLIDER, 16) images[0].save('out.gif', save_all=True, append_images=images[1:])
23
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small" UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase_ : List[str] = "en_speaker_1" UpperCAmelCase_ : Tuple = "This is a test string" UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json" UpperCAmelCase_ : Any = "speaker_embeddings" def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.get_tokenizer() UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) UpperCAmelCase_ : int = 35 UpperCAmelCase_ : Optional[Any] = 2 UpperCAmelCase_ : List[Any] = 8 UpperCAmelCase_ : Optional[Any] = { "semantic_prompt": np.ones(lowercase_ ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ ) UpperCAmelCase_ : List[str] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" ) np.savez(lowercase_ , **lowercase_ ) UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ ) UpperCAmelCase_ : List[str] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self.get_tokenizer() UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ ) UpperCAmelCase_ : Tuple = processor(text=self.input_string ) UpperCAmelCase_ : Union[str, Any] = tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
23
1
"""simple docstring""" import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Union[str, Any] = old_name if "patch_embed" in old_name: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = old_name.split("." ) if layer == "0": UpperCAmelCase_ : Any = old_name.replace("0", "convolution1" ) elif layer == "1": UpperCAmelCase_ : Tuple = old_name.replace("1", "batchnorm_before" ) elif layer == "3": UpperCAmelCase_ : Union[str, Any] = old_name.replace("3", "convolution2" ) else: UpperCAmelCase_ : int = old_name.replace("4", "batchnorm_after" ) if "network" in old_name and re.search(r"\d\.\d", __lowerCamelCase ): UpperCAmelCase_ : List[str] = r"\b\d{2}\b" if bool(re.search(__lowerCamelCase, __lowerCamelCase ) ): UpperCAmelCase_ : Tuple = re.search(r"\d\.\d\d.", __lowerCamelCase ).group() else: UpperCAmelCase_ : Optional[int] = re.search(r"\d\.\d.", __lowerCamelCase ).group() if int(match[0] ) < 6: UpperCAmelCase_ : Any = old_name.replace(__lowerCamelCase, "" ) UpperCAmelCase_ : str = trimmed_name.replace("network", match[0] + ".meta4D_layers.blocks." + match[2:-1] ) UpperCAmelCase_ : str = "intermediate_stages." + trimmed_name else: UpperCAmelCase_ : str = old_name.replace(__lowerCamelCase, "" ) if int(match[2] ) < num_meta4D_last_stage: UpperCAmelCase_ : Optional[Any] = trimmed_name.replace("network", "meta4D_layers.blocks." + match[2] ) else: UpperCAmelCase_ : List[str] = str(int(match[2] ) - num_meta4D_last_stage ) UpperCAmelCase_ : Optional[int] = trimmed_name.replace("network", "meta3D_layers.blocks." + layer_index ) if "norm1" in old_name: UpperCAmelCase_ : Optional[Any] = trimmed_name.replace("norm1", "layernorm1" ) elif "norm2" in old_name: UpperCAmelCase_ : Optional[Any] = trimmed_name.replace("norm2", "layernorm2" ) elif "fc1" in old_name: UpperCAmelCase_ : Tuple = trimmed_name.replace("fc1", "linear_in" ) elif "fc2" in old_name: UpperCAmelCase_ : Optional[Any] = trimmed_name.replace("fc2", "linear_out" ) UpperCAmelCase_ : Union[str, Any] = "last_stage." + trimmed_name elif "network" in old_name and re.search(r".\d.", __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = old_name.replace("network", "intermediate_stages" ) if "fc" in new_name: UpperCAmelCase_ : List[str] = new_name.replace("fc", "convolution" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): UpperCAmelCase_ : List[Any] = new_name.replace("norm1", "batchnorm_before" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): UpperCAmelCase_ : Union[str, Any] = new_name.replace("norm2", "batchnorm_after" ) if "proj" in new_name: UpperCAmelCase_ : Optional[int] = new_name.replace("proj", "projection" ) if "dist_head" in new_name: UpperCAmelCase_ : Union[str, Any] = new_name.replace("dist_head", "distillation_classifier" ) elif "head" in new_name: UpperCAmelCase_ : Dict = new_name.replace("head", "classifier" ) elif "patch_embed" in new_name: UpperCAmelCase_ : Union[str, Any] = "efficientformer." + new_name elif new_name == "norm.weight" or new_name == "norm.bias": UpperCAmelCase_ : Dict = new_name.replace("norm", "layernorm" ) UpperCAmelCase_ : int = "efficientformer." + new_name else: UpperCAmelCase_ : int = "efficientformer.encoder." + new_name return new_name def __a ( __lowerCamelCase, __lowerCamelCase ): for key in checkpoint.copy().keys(): UpperCAmelCase_ : List[str] = checkpoint.pop(__lowerCamelCase ) UpperCAmelCase_ : Optional[int] = val return checkpoint def __a ( ): UpperCAmelCase_ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : Optional[Any] = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw ) return image def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = torch.load(__lowerCamelCase, map_location="cpu" )["model"] UpperCAmelCase_ : int = EfficientFormerConfig.from_json_file(__lowerCamelCase ) UpperCAmelCase_ : List[str] = EfficientFormerForImageClassificationWithTeacher(__lowerCamelCase ) UpperCAmelCase_ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] ) UpperCAmelCase_ : Any = config.depths[-1] - config.num_metaad_blocks + 1 UpperCAmelCase_ : Union[str, Any] = convert_torch_checkpoint(__lowerCamelCase, __lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) model.eval() UpperCAmelCase_ : str = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } # prepare image UpperCAmelCase_ : List[str] = prepare_img() UpperCAmelCase_ : List[Any] = 256 UpperCAmelCase_ : List[str] = 224 UpperCAmelCase_ : Optional[int] = EfficientFormerImageProcessor( size={"shortest_edge": image_size}, crop_size={"height": crop_size, "width": crop_size}, resample=pillow_resamplings["bicubic"], ) UpperCAmelCase_ : str = processor(images=__lowerCamelCase, return_tensors="pt" ).pixel_values # original processing pipeline UpperCAmelCase_ : Dict = Compose( [ Resize(__lowerCamelCase, interpolation=pillow_resamplings["bicubic"] ), CenterCrop(__lowerCamelCase ), ToTensor(), Normalize(__lowerCamelCase, __lowerCamelCase ), ] ) UpperCAmelCase_ : Optional[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 ) assert torch.allclose(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase ) UpperCAmelCase_ : Union[str, Any] = outputs.logits UpperCAmelCase_ : str = (1, 1000) if "l1" in model_name: UpperCAmelCase_ : Tuple = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10], __lowerCamelCase, atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: UpperCAmelCase_ : Any = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10], __lowerCamelCase, atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: UpperCAmelCase_ : Dict = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(__lowerCamelCase ) print(f"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print("Pushing model to the hub..." ) model.push_to_hub( repo_id=f"""Bearnardd/{pytorch_dump_path}""", commit_message="Add model", use_temp_dir=__lowerCamelCase, ) processor.push_to_hub( repo_id=f"""Bearnardd/{pytorch_dump_path}""", commit_message="Add image processor", use_temp_dir=__lowerCamelCase, ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to EfficientFormer pytorch checkpoint.', ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for EfficientFormer model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) parser.set_defaults(push_to_hub=True) _a = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
23
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) def __a ( __lowerCamelCase, __lowerCamelCase=False ): UpperCAmelCase_ : Optional[int] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: UpperCAmelCase_ : int = "" else: UpperCAmelCase_ : Union[str, Any] = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size] UpperCAmelCase_ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :] def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase ) UpperCAmelCase_ : Tuple = val def __a ( ): UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[str] = DeiTConfig() # all deit models have fine-tuned heads UpperCAmelCase_ : Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase_ : Tuple = 1000 UpperCAmelCase_ : str = "huggingface/label-files" UpperCAmelCase_ : str = "imagenet-1k-id2label.json" UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) ) UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase_ : Any = idalabel UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()} UpperCAmelCase_ : Any = int(deit_name[-6:-4] ) UpperCAmelCase_ : Dict = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): UpperCAmelCase_ : Any = 192 UpperCAmelCase_ : Union[str, Any] = 768 UpperCAmelCase_ : Union[str, Any] = 12 UpperCAmelCase_ : int = 3 elif deit_name[9:].startswith("small" ): UpperCAmelCase_ : List[str] = 384 UpperCAmelCase_ : List[str] = 1536 UpperCAmelCase_ : Dict = 12 UpperCAmelCase_ : Any = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): UpperCAmelCase_ : int = 1024 UpperCAmelCase_ : List[Any] = 4096 UpperCAmelCase_ : Optional[int] = 24 UpperCAmelCase_ : int = 16 # load original model from timm UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCAmelCase_ : Optional[Any] = timm_model.state_dict() UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # load HuggingFace model UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor UpperCAmelCase_ : Union[str, Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size ) UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" ) UpperCAmelCase_ : int = encoding["pixel_values"] UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase ) UpperCAmelCase_ : Any = timm_model(__lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) _a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
23
1
"""simple docstring""" from typing import TYPE_CHECKING from ..utils import _LazyModule _a = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
23
"""simple docstring""" import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ ) UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )] UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin" ) for f in files ) @slow @require_flax class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ ) UpperCAmelCase_ : Tuple = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 ) UpperCAmelCase_ : List[str] = 4 UpperCAmelCase_ : Tuple = jax.device_count() UpperCAmelCase_ : Optional[int] = num_samples * [prompt] UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : int = replicate(lowercase_ ) UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3 assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1 UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(lowercase_ ) == num_samples def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ ) UpperCAmelCase_ : Optional[int] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : str = jax.random.PRNGKey(0 ) UpperCAmelCase_ : Union[str, Any] = 50 UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[str] = num_samples * [prompt] UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Any = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ ) UpperCAmelCase_ : Any = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : str = jax.random.PRNGKey(0 ) UpperCAmelCase_ : str = 50 UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Dict = replicate(lowercase_ ) UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ ) UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa ) UpperCAmelCase_ : List[Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 ) UpperCAmelCase_ : Optional[int] = 50 UpperCAmelCase_ : Optional[int] = jax.device_count() UpperCAmelCase_ : str = num_samples * [prompt] UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ ) UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = shard(lowercase_ ) UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , ) UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , ) UpperCAmelCase_ : List[Any] = scheduler.create_state() UpperCAmelCase_ : int = scheduler_state UpperCAmelCase_ : Union[str, Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 ) UpperCAmelCase_ : int = 50 UpperCAmelCase_ : str = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ ) # shard inputs and rng UpperCAmelCase_ : int = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = shard(lowercase_ ) UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3 assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) UpperCAmelCase_ : List[str] = jax.device_count() UpperCAmelCase_ : List[Any] = num_samples * [prompt] UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ ) UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , ) UpperCAmelCase_ : Any = replicate(lowercase_ ) UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ ) UpperCAmelCase_ : List[str] = shard(lowercase_ ) UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images.shape == (num_samples, 1, 512, 512, 3) UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1] # With memory efficient attention UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , ) UpperCAmelCase_ : str = replicate(lowercase_ ) UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ ) UpperCAmelCase_ : Optional[int] = shard(lowercase_ ) UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
23
1
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ): if radian_mode: return [magnitude * cos(__lowerCamelCase ), magnitude * sin(__lowerCamelCase )] return [magnitude * cos(radians(__lowerCamelCase ) ), magnitude * sin(radians(__lowerCamelCase ) )] def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 10**-1 ): UpperCAmelCase_ : NDArray[floataa] = cross(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : float = sum(__lowerCamelCase ) return abs(__lowerCamelCase ) < eps if __name__ == "__main__": # Test to check if it works _a = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) _a = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg _a = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) _a = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg _a = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]]) _a = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
23
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean _a = 0 _a = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right _a = tuple[int, int] class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : int = pos_x UpperCAmelCase_ : List[Any] = pos_y UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x) UpperCAmelCase_ : Any = goal_x UpperCAmelCase_ : Dict = goal_y UpperCAmelCase_ : Any = g_cost UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = self.calculate_heuristic() UpperCAmelCase_ : Any = self.g_cost + self.h_cost def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowercase_ ) + abs(lowercase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , lowercase_ ): """simple docstring""" return self.f_cost < other.f_cost class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ ) UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ ) UpperCAmelCase_ : str = [self.start] UpperCAmelCase_ : list[Node] = [] UpperCAmelCase_ : int = False def UpperCamelCase__ ( self ): """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowercase_ ) self.closed_nodes.append(lowercase_ ) UpperCAmelCase_ : str = self.get_successors(lowercase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowercase_ ) else: # retrieve the best current path UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowercase_ ) else: self.open_nodes.append(lowercase_ ) return [self.start.pos] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Any = [] for action in delta: UpperCAmelCase_ : str = parent.pos_x + action[1] UpperCAmelCase_ : int = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) ) return successors def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = node UpperCAmelCase_ : int = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Optional[int] = current_node.parent path.reverse() return path class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = False def UpperCamelCase__ ( self ): """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 ) UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowercase_ , lowercase_ ) self.fwd_astar.closed_nodes.append(lowercase_ ) self.bwd_astar.closed_nodes.append(lowercase_ ) UpperCAmelCase_ : Tuple = current_bwd_node UpperCAmelCase_ : str = current_fwd_node UpperCAmelCase_ : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowercase_ ) else: # retrieve the best current path UpperCAmelCase_ : List[Any] = astar.open_nodes.pop( astar.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowercase_ ) else: astar.open_nodes.append(lowercase_ ) return [self.fwd_astar.start.pos] def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ ) UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ : Any = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] _a = (0, 0) _a = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) _a = time.time() _a = AStar(init, goal) _a = a_star.search() _a = time.time() - start_time print(f"""AStar execution time = {end_time:f} seconds""") _a = time.time() _a = BidirectionalAStar(init, goal) _a = time.time() - bd_start_time print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
23
1
"""simple docstring""" import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging _a = logging.get_logger(__name__) _a = {'vocab_file': 'vocab.txt'} _a = { 'vocab_file': { 'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt', 'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt', }, } _a = { 'facebook/esm2_t6_8M_UR50D': 1_024, 'facebook/esm2_t12_35M_UR50D': 1_024, } def __a ( __lowerCamelCase ): with open(__lowerCamelCase, "r" ) as f: UpperCAmelCase_ : int = f.read().splitlines() return [l.strip() for l in lines] class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : int = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase_ , lowercase_="<unk>" , lowercase_="<cls>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_="<eos>" , **lowercase_ , ): """simple docstring""" super().__init__(**lowercase_ ) UpperCAmelCase_ : str = load_vocab_file(lowercase_ ) UpperCAmelCase_ : Any = dict(enumerate(self.all_tokens ) ) UpperCAmelCase_ : str = {tok: ind for ind, tok in enumerate(self.all_tokens )} UpperCAmelCase_ : int = unk_token UpperCAmelCase_ : Optional[int] = cls_token UpperCAmelCase_ : Optional[int] = pad_token UpperCAmelCase_ : Optional[int] = mask_token UpperCAmelCase_ : List[Any] = eos_token UpperCAmelCase_ : str = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self._id_to_token.get(lowercase_ , self.unk_token ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self._token_to_id.get(lowercase_ , self._token_to_id.get(self.unk_token ) ) def UpperCamelCase__ ( self , lowercase_ , **lowercase_ ): """simple docstring""" return text.split() def UpperCamelCase__ ( self , lowercase_=False ): """simple docstring""" return len(self._id_to_token ) def UpperCamelCase__ ( self ): """simple docstring""" return {token: i for i, token in enumerate(self.all_tokens )} def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self._token_to_id.get(lowercase_ , self._token_to_id.get(self.unk_token ) ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self._id_to_token.get(lowercase_ , self.unk_token ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" UpperCAmelCase_ : List[Any] = [self.cls_token_id] UpperCAmelCase_ : int = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] UpperCAmelCase_ : Dict = [1] + ([0] * len(lowercase_ )) + [1] if token_ids_a is not None: mask += [0] * len(lowercase_ ) + [1] return mask def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = os.path.join(lowercase_ , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" ) with open(lowercase_ , "w" ) as f: f.write("\n".join(self.all_tokens ) ) return (vocab_file,) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.get_vocab_size(with_added_tokens=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = False ): """simple docstring""" return super()._add_tokens(lowercase_ , special_tokens=lowercase_ )
23
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,) SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),) def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : int = { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowercase_ ) return config def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = dict(self.forward_default_kwargs ) UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = self.dummy_sample UpperCAmelCase_ : Dict = 0.1 * sample UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals UpperCAmelCase_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals UpperCAmelCase_ : int = dummy_past_residuals[:] UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs ) UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ ) UpperCAmelCase_ : Optional[int] = self.dummy_sample UpperCAmelCase_ : List[str] = 0.1 * sample UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : str = self.get_scheduler_config() UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:] UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = self.scheduler_classes[0] UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ ) UpperCAmelCase_ : Tuple = 10 UpperCAmelCase_ : List[str] = self.dummy_model() UpperCAmelCase_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample return sample def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = dict(self.forward_default_kwargs ) UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ ) for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : Any = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ ) UpperCAmelCase_ : str = self.dummy_sample UpperCAmelCase_ : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ): scheduler.set_timesteps(lowercase_ ) elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ): UpperCAmelCase_ : List[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase_ : List[str] = dummy_past_residuals[:] UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase__ ( self ): """simple docstring""" for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_ ) UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0] UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def UpperCamelCase__ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 UpperCAmelCase_ : List[Any] = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : List[Any] = self.dummy_sample UpperCAmelCase_ : Optional[int] = 0.1 * sample UpperCAmelCase_ : List[str] = self.get_scheduler_config() UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample def UpperCamelCase__ ( self ): """simple docstring""" with self.assertRaises(lowercase_ ): UpperCAmelCase_ : List[str] = self.scheduler_classes[0] UpperCAmelCase_ : str = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.full_loop() UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2 assert abs(result_mean.item() - 0.25_80 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 67.39_86 ) < 1E-2 assert abs(result_mean.item() - 0.08_78 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2 assert abs(result_mean.item() - 0.29_95 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2 assert abs(result_mean.item() - 0.24_34 ) < 1E-3
23
1
"""simple docstring""" import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel _a = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class A_ (unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase__ ( cls ): """simple docstring""" UpperCAmelCase_ : str = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def UpperCamelCase__ ( cls ): """simple docstring""" try: delete_repo(token=cls._token , repo_id="test-model-flax" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" ) except HTTPError: pass def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase_ : int = FlaxBertModel(lowercase_ ) model.push_to_hub("test-model-flax" , use_auth_token=self._token ) UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) UpperCAmelCase_ : List[Any] = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Union[str, Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowercase_ , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id="test-model-flax" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowercase_ , repo_id="test-model-flax" , push_to_hub=lowercase_ , use_auth_token=self._token ) UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowercase_ , 1E-3 , msg=F"""{key} not identical""" ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase_ : Union[str, Any] = FlaxBertModel(lowercase_ ) model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token ) UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" ) UpperCAmelCase_ : Any = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : List[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Dict = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowercase_ , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( lowercase_ , repo_id="valid_org/test-model-flax-org" , push_to_hub=lowercase_ , use_auth_token=self._token ) UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" ) UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowercase_ , 1E-3 , msg=F"""{key} not identical""" ) def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : Union[str, Any] = flatten_dict(modela.params ) UpperCAmelCase_ : Union[str, Any] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: UpperCAmelCase_ : int = False return models_are_equal @require_flax class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) UpperCAmelCase_ : Any = FlaxBertModel(lowercase_ ) UpperCAmelCase_ : Optional[Any] = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(lowercase_ , lowercase_ ) ) with self.assertRaises(lowercase_ ): UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(lowercase_ ) UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ ) self.assertTrue(check_models_equal(lowercase_ , lowercase_ ) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) UpperCAmelCase_ : int = FlaxBertModel(lowercase_ ) UpperCAmelCase_ : Tuple = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(lowercase_ , lowercase_ ) , max_shard_size="10KB" ) with self.assertRaises(lowercase_ ): UpperCAmelCase_ : Optional[Any] = FlaxBertModel.from_pretrained(lowercase_ ) UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ ) self.assertTrue(check_models_equal(lowercase_ , lowercase_ ) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = "bert" UpperCAmelCase_ : List[str] = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(lowercase_ ): UpperCAmelCase_ : Optional[Any] = FlaxBertModel.from_pretrained(lowercase_ ) UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ ) self.assertIsNotNone(lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = "bert" UpperCAmelCase_ : str = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(lowercase_ ): UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(lowercase_ ) UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ ) self.assertIsNotNone(lowercase_ )
23
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _a = object() # For specifying empty leaf dict `{}` _a = object() def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ): UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )] if matches and all(__lowerCamelCase ): return True return False def __a ( __lowerCamelCase ): def replace(__lowerCamelCase, __lowerCamelCase ): for rule, replacement in rules: if _match(__lowerCamelCase, __lowerCamelCase ): return replacement return val return replace def __a ( ): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )), (("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )), (("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def __a ( __lowerCamelCase ): UpperCAmelCase_ : List[str] = _get_partition_rules() UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase ) UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )} UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCamelCase ) )
23
1
"""simple docstring""" from __future__ import annotations def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = str(__lowerCamelCase ) return len(__lowerCamelCase ) == 9 and set(__lowerCamelCase ) == set("123456789" ) def __a ( ): for base_num in range(9999, 4999, -1 ): UpperCAmelCase_ : Tuple = 10_0002 * base_num if is_9_pandigital(__lowerCamelCase ): return candidate for base_num in range(333, 99, -1 ): UpperCAmelCase_ : int = 100_2003 * base_num if is_9_pandigital(__lowerCamelCase ): return candidate return None if __name__ == "__main__": print(f"""{solution() = }""")
23
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _a = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )] if identifier is not None: UpperCAmelCase_ : Dict = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase_ , lowercase_ ): for n_ in n_identifier: UpperCAmelCase_ : str = [file for file in files if n_ not in file] else: UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file] UpperCAmelCase_ : Union[str, Any] = ignore_files or [] ignore_files.append("__init__.py" ) UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , lowercase_ ) if only_modules: UpperCAmelCase_ : str = file.split("." )[0] try: UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ ) UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F"""{module_identifier} is not a module.""" ) else: UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = Path("src/transformers" ) UpperCAmelCase_ : str = "modeling" UpperCAmelCase_ : Optional[Any] = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = Path("src/transformers" ) UpperCAmelCase_ : Any = "tokenization" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = "configuration" self.analyze_directory(lowercase_ , identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" ) UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"] self.analyze_directory(lowercase_ , n_identifier=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = Path("docs/source" ) UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"] self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
23
1
"""simple docstring""" from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
23
"""simple docstring""" import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef _a = ( 'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' ) def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) return (preds == labels).mean() def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def __a ( __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0] UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "mrpc": return acc_and_fa(__lowerCamelCase, __lowerCamelCase ) elif task_name == "sts-b": return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase ) elif task_name == "qqp": return acc_and_fa(__lowerCamelCase, __lowerCamelCase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} else: raise KeyError(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): warnings.warn(__lowerCamelCase, __lowerCamelCase ) requires_backends(__lowerCamelCase, "sklearn" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )} else: raise KeyError(__lowerCamelCase )
23
1
"""simple docstring""" import os from typing import Dict, List, Tuple, TypeVar, Union _a = TypeVar('T') _a = Union[List[T], Tuple[T, ...]] _a = Union[T, List[T], Dict[str, T]] _a = Union[str, bytes, os.PathLike]
23
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = {'vocab_file': 'vocab.json'} _a = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } _a = {'mgp-str': 27} class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ): """simple docstring""" super().__init__( unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , ) with open(lowercase_ , encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : Dict = json.load(lowercase_ ) UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()} @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [] for s in text: char_tokens.extend(lowercase_ ) return char_tokens def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return self.decoder.get(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ): """simple docstring""" if not os.path.isdir(lowercase_ ): logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) ) return UpperCAmelCase_ : Optional[int] = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(lowercase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" ) return (vocab_file,)
23
1
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate _a = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('', '|', '|'), datarow=DataRow('', '|', '|'), padding=1, with_header_hide=None, ) _a = [] _a = [] _a = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}} _a = [ { 'type': 'header', 'text': { 'type': 'plain_text', 'text': f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""", 'emoji': True, }, } ] _a = 0 for log in Path().glob('*.log'): _a = 0 with open(log, 'r') as f: for line in f: _a = json.loads(line) if line.get('nodeid', '') != "": _a = line['nodeid'] if line.get('duration', None) is not None: _a = f"""{line['duration']:.4f}""" if line.get('outcome', '') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('_')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) _a = [] log.unlink() _a = '' _a = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" _a = [] _a = {} for test in failed_tests: _a = test[0].split('::') _a = data[0].split('/')[-1] if data[0] not in filesafailed: _a = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) _a = [test[0] for test in failed_table] _a = list(set(files)) # Count number of instances in failed_tests _a = [] for file in individual_files: table.append([file, len(filesafailed[file])]) _a = tabulate( table, headers=['Test Location', 'Num Failed'], tablefmt=hf_table_format, stralign='right', ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_000: _a = 'Too many failed tests, please see the full report in the Action results.' _a = len(err) + 10 _a = message[: 3_000 - offset] + f"""\n...\n```\n{err}""" print(f"""### {message}""") else: _a = 'No failed tests! 🤗' print(f"""## {message}""") payload.append(no_error_payload) if os.environ.get('TEST_TYPE', '') != "": from slack_sdk import WebClient _a = WebClient(token=os.environ['SLACK_API_TOKEN']) if message != "No failed tests! 🤗": _a = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': message, }, } payload.append(md_report) _a = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': '*For more details:*', }, 'accessory': { 'type': 'button', 'text': { 'type': 'plain_text', 'text': 'Check Action results', 'emoji': True, }, 'url': f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } payload.append(action_button) _a = { 'type': 'context', 'elements': [ { 'type': 'plain_text', 'text': f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""", } ], } payload.append(date_report) _a = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload) _a = response.data['ts'] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name _a = '' for i, row in enumerate(test_failures): if row[0] != test_class: _a = row[0] else: _a = '' _a = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""", }, } client.chat_postMessage( channel='#accelerate-ci-daily', thread_ts=ts, blocks=[payload], )
23
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency _a = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } _a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' _a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def __a ( __lowerCamelCase ): return x[0] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase ) UpperCAmelCase_ : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase ) UpperCAmelCase_ : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase ) UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] ) UpperCAmelCase_ : str = list(freq_to_letter_str.items() ) freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase ) UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(__lowerCamelCase ) def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase ) UpperCAmelCase_ : int = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
23
1
"""simple docstring""" import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = emb.weight.shape UpperCAmelCase_ : List[str] = nn.Linear(__lowerCamelCase, __lowerCamelCase, bias=__lowerCamelCase ) UpperCAmelCase_ : Any = emb.weight.data return lin_layer def __a ( __lowerCamelCase, __lowerCamelCase=None ): UpperCAmelCase_ : str = {} for old_key in state_dict.keys(): UpperCAmelCase_ : Optional[int] = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase_ : str = key.replace("moe_layer.experts.0", f"""ffn.experts.expert_{expert_idx}""" ) else: UpperCAmelCase_ : Union[str, Any] = key.replace("moe_layer.experts.", "ffn.experts.expert_" ) if "gate" in key: UpperCAmelCase_ : int = key.replace(".moe_layer.gate.wg", ".ffn.router.classifier" ) if "fc2" and "experts" not in key: UpperCAmelCase_ : Tuple = key.replace(".fc2.", ".ffn.fc2." ) if "fc1" and "experts" not in key: UpperCAmelCase_ : str = key.replace(".fc1.", ".ffn.fc1." ) if ".encoder_attn." in key: UpperCAmelCase_ : Any = key.replace(".encoder_attn.", ".cross_attention." ) if "encoder_attn_layer_norm" in key: UpperCAmelCase_ : str = key.replace("encoder_attn_layer_norm", "cross_attention_layer_norm" ) if "final_layer_norm" in key: UpperCAmelCase_ : Optional[Any] = key.replace("final_layer_norm", "ff_layer_norm" ) UpperCAmelCase_ : str = state_dict[old_key] return new_dict def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = WEIGHTS_NAME ): UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : int = 0 os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase ) for expert in range(__lowerCamelCase ): UpperCAmelCase_ : str = switch_checkpoint_path + f"""-rank-{expert}.pt""" if os.path.isfile(__lowerCamelCase ): UpperCAmelCase_ : Tuple = torch.load(__lowerCamelCase )["model"] remove_ignore_keys_(__lowerCamelCase ) UpperCAmelCase_ : Optional[Any] = rename_fairseq_keys(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : Optional[int] = os.path.join( __lowerCamelCase, weights_name.replace(".bin", f"""-{len(__lowerCamelCase )+1:05d}-of-???.bin""" ) ) torch.save(__lowerCamelCase, __lowerCamelCase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__lowerCamelCase )[0]].dtype ) # Add the last block UpperCAmelCase_ : Optional[Any] = os.path.join(__lowerCamelCase, weights_name.replace(".bin", f"""-{len(__lowerCamelCase )+1:05d}-of-???.bin""" ) ) UpperCAmelCase_ : Union[str, Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(__lowerCamelCase ) UpperCAmelCase_ : Union[str, Any] = rename_fairseq_keys(__lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : Union[str, Any] = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__lowerCamelCase ) == 1: UpperCAmelCase_ : int = os.path.join(__lowerCamelCase, __lowerCamelCase ) torch.save(__lowerCamelCase, __lowerCamelCase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__lowerCamelCase, __lowerCamelCase ) # Otherwise, let's build the index UpperCAmelCase_ : Tuple = {} for idx, shard in enumerate(__lowerCamelCase ): UpperCAmelCase_ : List[str] = weights_name.replace(".bin", f"""-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin""" ) UpperCAmelCase_ : Optional[Any] = os.path.join(__lowerCamelCase, weights_name.replace(".bin", f"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(__lowerCamelCase, os.path.join(__lowerCamelCase, __lowerCamelCase ) ) for key in shard: UpperCAmelCase_ : str = shard_file # Add the metadata UpperCAmelCase_ : int = {"total_size": total_size} UpperCAmelCase_ : Any = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), "w", encoding="utf-8" ) as f: UpperCAmelCase_ : int = json.dumps(__lowerCamelCase, indent=2, sort_keys=__lowerCamelCase ) + "\n" f.write(__lowerCamelCase ) return metadata, index if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) _a = parser.parse_args() _a , _a = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) _a = NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) _a = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
23
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _a = logging.getLogger() def __a ( ): UpperCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ : Dict = parser.parse_args() return args.f class A_ (lowercase__ ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(lowercase_ , "argv" , lowercase_ ): UpperCAmelCase_ : List[str] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(lowercase_ , 0.6_66 ) @slow @require_torch_non_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ ) UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowercase_ )
23
1
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_=13 , lowercase_=32 , lowercase_=3 , lowercase_=4 , lowercase_=[10, 20, 30, 40] , lowercase_=[2, 2, 3, 2] , lowercase_=True , lowercase_=True , lowercase_=37 , lowercase_="gelu" , lowercase_=10 , lowercase_=0.02 , lowercase_=["stage2", "stage3", "stage4"] , lowercase_=[2, 3, 4] , lowercase_=None , ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : Union[str, Any] = num_stages UpperCAmelCase_ : List[str] = hidden_sizes UpperCAmelCase_ : Optional[Any] = depths UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : Tuple = intermediate_size UpperCAmelCase_ : Optional[int] = hidden_act UpperCAmelCase_ : Optional[int] = num_labels UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : str = out_features UpperCAmelCase_ : Optional[int] = out_indices UpperCAmelCase_ : int = scope def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : int = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Any = ConvNextVaModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowercase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = ConvNextVaForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = ConvNextVaBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowercase_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCAmelCase_ : int = None UpperCAmelCase_ : str = ConvNextVaBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : int = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = config_and_inputs UpperCAmelCase_ : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs UpperCAmelCase_ : Optional[int] = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Dict = ( {"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Optional[int] = False SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[Any] = False SCREAMING_SNAKE_CASE__ : Optional[Any] = False def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = ConvNextVaModelTester(self ) UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ): """simple docstring""" return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() UpperCAmelCase_ : List[str] = True if model_class.__name__ in [ *get_values(lowercase_ ), *get_values(lowercase_ ), ]: continue UpperCAmelCase_ : Union[str, Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.train() UpperCAmelCase_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) UpperCAmelCase_ : int = model(**lowercase_ ).loss loss.backward() def UpperCamelCase__ ( self ): """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_with_labels() UpperCAmelCase_ : Optional[int] = False UpperCAmelCase_ : Optional[int] = True if ( model_class.__name__ in [*get_values(lowercase_ ), *get_values(lowercase_ )] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase_ : Optional[Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.gradient_checkpointing_enable() model.train() UpperCAmelCase_ : Optional[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) UpperCAmelCase_ : int = model(**lowercase_ ).loss loss.backward() def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : str = model_class(lowercase_ ) UpperCAmelCase_ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[str] = [*signature.parameters.keys()] UpperCAmelCase_ : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase_ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : int = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) UpperCAmelCase_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : List[Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Any = ConvNextVaModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def __a ( ): UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A_ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase__ ( self ): """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(lowercase_ ) UpperCAmelCase_ : Any = self.default_image_processor UpperCAmelCase_ : Dict = prepare_img() UpperCAmelCase_ : List[str] = preprocessor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Dict = model(**lowercase_ ) # verify the logits UpperCAmelCase_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase_ ) UpperCAmelCase_ : Dict = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
23
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
23
1
"""simple docstring""" import os import sys import unittest _a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _a = os.path.join(git_repo_path, 'src', 'transformers') _a = '\n{0} = None\n' _a = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n' _a = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(lowercase_ ) UpperCAmelCase_ : str = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(lowercase_ , "tokenizers" ) UpperCAmelCase_ : Dict = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(lowercase_ , "tensorflow_text" ) UpperCAmelCase_ : List[str] = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(lowercase_ , "sentencepiece_and_tokenizers" ) UpperCAmelCase_ : List[str] = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(lowercase_ , "sentencepiece_and_tensorflow_text" ) UpperCAmelCase_ : List[str] = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(lowercase_ , "sentencepiece_and_tokenizers_and_vision" ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , lowercase_ ) self.assertIn("tensorflow_text" , lowercase_ ) self.assertIn("sentencepiece_and_tokenizers" , lowercase_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(lowercase_ , "\nCONSTANT = None\n" ) UpperCAmelCase_ : int = create_dummy_object("function" , "'torch'" ) self.assertEqual( lowercase_ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) UpperCAmelCase_ : Tuple = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" UpperCAmelCase_ : Dict = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" UpperCAmelCase_ : List[Any] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , lowercase_ )
23
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _a = logging.get_logger(__name__) # pylint: disable=invalid-name _a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ): UpperCAmelCase_ : List[str] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : Tuple = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" super().__init__() self.register_modules( unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , ) UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" if latents is None: UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) UpperCAmelCase_ : str = latents.to(lowercase_ ) UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma return latents def UpperCamelCase__ ( self , lowercase_=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" ) UpperCAmelCase_ : int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self , lowercase_=0 ): """simple docstring""" if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=lowercase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : List[Any] = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ ) # We'll offload the last model manually. UpperCAmelCase_ : Tuple = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCamelCase__ ( self ): """simple docstring""" if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase_ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowercase_ ) def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : str = self._execution_device UpperCAmelCase_ : List[Any] = guidance_scale > 1.0 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 ) UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ ) self.scheduler.set_timesteps(lowercase_ , device=lowercase_ ) UpperCAmelCase_ : List[Any] = self.scheduler.timesteps UpperCAmelCase_ : List[str] = self.unet.config.in_channels UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor ) # create initial latent UpperCAmelCase_ : int = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds} UpperCAmelCase_ : Optional[Any] = self.unet( sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 ) UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : List[str] = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0] # post-processing UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5 UpperCAmelCase_ : int = image.clamp(0 , 1 ) UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
23
1
"""simple docstring""" def __a ( __lowerCamelCase, __lowerCamelCase ): return price * (1 + tax_rate) if __name__ == "__main__": print(f"""{price_plus_tax(100, 0.25) = }""") print(f"""{price_plus_tax(125.50, 0.05) = }""")
23
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _a = logging.get_logger(__name__) _a = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """detr""" SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = backbone_config.get("model_type" ) UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None UpperCAmelCase_ : int = use_timm_backbone UpperCAmelCase_ : int = backbone_config UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : int = num_queries UpperCAmelCase_ : Union[str, Any] = d_model UpperCAmelCase_ : str = encoder_ffn_dim UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : List[Any] = encoder_attention_heads UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim UpperCAmelCase_ : Optional[Any] = decoder_layers UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads UpperCAmelCase_ : Optional[int] = dropout UpperCAmelCase_ : List[str] = attention_dropout UpperCAmelCase_ : Any = activation_dropout UpperCAmelCase_ : str = activation_function UpperCAmelCase_ : Tuple = init_std UpperCAmelCase_ : Optional[Any] = init_xavier_std UpperCAmelCase_ : Optional[Any] = encoder_layerdrop UpperCAmelCase_ : Optional[int] = decoder_layerdrop UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : int = auxiliary_loss UpperCAmelCase_ : Optional[Any] = position_embedding_type UpperCAmelCase_ : Tuple = backbone UpperCAmelCase_ : Optional[int] = use_pretrained_backbone UpperCAmelCase_ : Dict = dilation # Hungarian matcher UpperCAmelCase_ : Union[str, Any] = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : int = giou_cost # Loss coefficients UpperCAmelCase_ : str = mask_loss_coefficient UpperCAmelCase_ : Any = dice_loss_coefficient UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient UpperCAmelCase_ : List[str] = giou_loss_coefficient UpperCAmelCase_ : List[Any] = eos_coefficient super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.encoder_attention_heads @property def UpperCamelCase__ ( self ): """simple docstring""" return self.d_model @classmethod def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ): """simple docstring""" return cls(backbone_config=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase_ : str = self.__class__.model_type return output class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def UpperCamelCase__ ( self ): """simple docstring""" return 1E-5 @property def UpperCamelCase__ ( self ): """simple docstring""" return 12
23
1