code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class A_ : '''simple docstring''' @staticmethod def UpperCamelCase__ ( *lowercase_ , **lowercase_ ): """simple docstring""" pass @is_pipeline_test @require_torch @require_vision class A_ (unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) UpperCAmelCase_ : List[str] = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : int = vqa_pipeline(lowercase_ , top_k=1 ) self.assertEqual( lowercase_ , [ [{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ )}], [{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ )}], ] , ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) UpperCAmelCase_ : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png" UpperCAmelCase_ : Any = "How many cats are there?" UpperCAmelCase_ : List[str] = vqa_pipeline(image=lowercase_ , question="How many cats are there?" , top_k=2 ) self.assertEqual( lowercase_ , [{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ )}, {"score": ANY(lowercase_ ), "answer": ANY(lowercase_ )}] ) UpperCAmelCase_ : str = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( lowercase_ , [{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ )}, {"score": ANY(lowercase_ ), "answer": ANY(lowercase_ )}] ) @slow @require_torch def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" ) UpperCAmelCase_ : str = "./tests/fixtures/tests_samples/COCO/000000039769.png" UpperCAmelCase_ : List[Any] = "How many cats are there?" UpperCAmelCase_ : str = vqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] ) UpperCAmelCase_ : Tuple = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] ) UpperCAmelCase_ : int = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , ) @require_tf @unittest.skip("Visual question answering not implemented in TF" ) def UpperCamelCase__ ( self ): """simple docstring""" pass
61
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCAmelCase = 0 __UpperCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCAmelCase = tuple[int, int] class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = pos_x SCREAMING_SNAKE_CASE : Any = pos_y SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x) SCREAMING_SNAKE_CASE : Tuple = goal_x SCREAMING_SNAKE_CASE : List[str] = goal_y SCREAMING_SNAKE_CASE : Optional[Any] = g_cost SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : int = self.calculate_heuristic() SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ): '''simple docstring''' return self.f_cost < other.f_cost class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = [self.start] SCREAMING_SNAKE_CASE : list[Node] = [] SCREAMING_SNAKE_CASE : str = False def lowerCamelCase_ ( self : Any ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowerCamelCase_ ) self.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCamelCase_ ) else: self.open_nodes.append(lowerCamelCase_ ) return [self.start.pos] def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [] for action in delta: SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1] SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) ) return successors def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = node SCREAMING_SNAKE_CASE : List[str] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent path.reverse() return path class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = False def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCamelCase_ , lowerCamelCase_ ) self.fwd_astar.closed_nodes.append(lowerCamelCase_ ) self.bwd_astar.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node SCREAMING_SNAKE_CASE : Any = current_fwd_node SCREAMING_SNAKE_CASE : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop( astar.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCamelCase_ ) else: astar.open_nodes.append(lowerCamelCase_ ) return [self.fwd_astar.start.pos] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ ) bwd_path.pop() bwd_path.reverse() SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCAmelCase = (0, 0) __UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCAmelCase = time.time() __UpperCAmelCase = AStar(init, goal) __UpperCAmelCase = a_star.search() __UpperCAmelCase = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') __UpperCAmelCase = time.time() __UpperCAmelCase = BidirectionalAStar(init, goal) __UpperCAmelCase = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
323
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =filter(lambda SCREAMING_SNAKE_CASE__ : p.requires_grad , model.parameters() ) __UpperCamelCase =sum([np.prod(p.size() ) for p in model_parameters] ) return params _A = logging.getLogger(__name__) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ): if metric == "rouge2": __UpperCamelCase ='{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __UpperCamelCase ='{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __UpperCamelCase ='{val_avg_em:.4f}-{step_count}' else: raise NotImplementedError( F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) __UpperCamelCase =ModelCheckpoint( dirpath=SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , monitor=F'val_{metric}' , mode='max' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ): return EarlyStopping( monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , ) class UpperCAmelCase__ ( pl.Callback ): """simple docstring""" def _a ( self , A_ , A_ ) -> Dict: __UpperCamelCase ={f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(A_ ) @rank_zero_only def _a ( self , A_ , A_ , A_ , A_=True ) -> None: logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) __UpperCamelCase =trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __UpperCamelCase =Path(pl_module.hparams.output_dir ) if type_path == "test": __UpperCamelCase =od / 'test_results.txt' __UpperCamelCase =od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __UpperCamelCase =od / f'{type_path}_results/{trainer.global_step:05d}.txt' __UpperCamelCase =od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=A_ ) generations_file.parent.mkdir(exist_ok=A_ ) with open(A_ , 'a+' ) as writer: for key in sorted(A_ ): if key in ["log", "progress_bar", "preds"]: continue __UpperCamelCase =metrics[key] if isinstance(A_ , torch.Tensor ): __UpperCamelCase =val.item() __UpperCamelCase =f'{key}: {val:.6f}\n' writer.write(A_ ) if not save_generations: return if "preds" in metrics: __UpperCamelCase ='\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(A_ ) @rank_zero_only def _a ( self , A_ , A_ ) -> Tuple: try: __UpperCamelCase =pl_module.model.model.num_parameters() except AttributeError: __UpperCamelCase =pl_module.model.num_parameters() __UpperCamelCase =count_trainable_parameters(A_ ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def _a ( self , A_ , A_ ) -> List[str]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(A_ , A_ , 'test' ) @rank_zero_only def _a ( self , A_ , A_ ) -> Union[str, Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
62
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''efficientnet''' def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : int = width_coefficient SCREAMING_SNAKE_CASE : List[str] = depth_coefficient SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor SCREAMING_SNAKE_CASE : List[str] = kernel_sizes SCREAMING_SNAKE_CASE : Dict = in_channels SCREAMING_SNAKE_CASE : List[str] = out_channels SCREAMING_SNAKE_CASE : Any = depthwise_padding SCREAMING_SNAKE_CASE : Dict = strides SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats SCREAMING_SNAKE_CASE : Any = expand_ratios SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dim SCREAMING_SNAKE_CASE : List[str] = pooling_type SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : Any = batch_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum SCREAMING_SNAKE_CASE : Dict = dropout_rate SCREAMING_SNAKE_CASE : int = drop_connect_rate SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4 class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return 1e-5
323
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer lowerCAmelCase_ : Any = logging.get_logger(__name__) lowerCAmelCase_ : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase_ : List[str] = [ 'small', 'small-base', 'medium', 'medium-base', 'intermediate', 'intermediate-base', 'large', 'large-base', 'xlarge', 'xlarge-base', ] lowerCAmelCase_ : str = { 'vocab_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json', 'funnel-transformer/small-base': ( 'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json' ), 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json', 'funnel-transformer/large-base': ( 'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json' ), 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json' ), }, } lowerCAmelCase_ : List[Any] = {f"""funnel-transformer/{name}""": 5_12 for name in _model_names} lowerCAmelCase_ : List[Any] = {f"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names} class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =VOCAB_FILES_NAMES __a =PRETRAINED_VOCAB_FILES_MAP __a =PRETRAINED_INIT_CONFIGURATION __a =FunnelTokenizer __a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a =2 def __init__( self : str , __a : Optional[Any]=None , __a : Optional[Any]=None , __a : List[str]=True , __a : str="<unk>" , __a : List[Any]="<sep>" , __a : Optional[int]="<pad>" , __a : str="<cls>" , __a : Union[str, Any]="<mask>" , __a : Dict="<s>" , __a : Tuple="</s>" , __a : Union[str, Any]=True , __a : str=True , __a : int=None , __a : int="##" , **__a : List[Any] , ): super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , bos_token=__a , eos_token=__a , clean_text=__a , tokenize_chinese_chars=__a , strip_accents=__a , wordpieces_prefix=__a , **__a , ) _a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __a ) != do_lower_case or normalizer_state.get("strip_accents" , __a ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars ): _a = getattr(__a , normalizer_state.pop("type" ) ) _a = do_lower_case _a = strip_accents _a = tokenize_chinese_chars _a = normalizer_class(**__a ) _a = do_lower_case def UpperCamelCase__ ( self : Any , __a : Any , __a : Dict=None ): _a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase__ ( self : str , __a : List[int] , __a : Optional[List[int]] = None ): _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self : str , __a : str , __a : Optional[str] = None ): _a = self._tokenizer.model.save(__a , name=__a ) return tuple(__a )
63
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {} SCREAMING_SNAKE_CASE : List[Any] = {} if prompt is not None: SCREAMING_SNAKE_CASE : List[Any] = prompt if generate_kwargs is not None: SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: SCREAMING_SNAKE_CASE : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ): '''simple docstring''' return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ ) if prompt is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type if model_type == "git": SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: SCREAMING_SNAKE_CASE : Optional[Any] = None return model_inputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ): '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): SCREAMING_SNAKE_CASE : List[str] = None if generate_kwargs is None: SCREAMING_SNAKE_CASE : int = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name ) SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ ) return model_outputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] for output_ids in model_outputs: SCREAMING_SNAKE_CASE : List[Any] = { """generated_text""": self.tokenizer.decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , ) } records.append(lowerCamelCase_ ) return records
323
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["note_seq"] def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ): '''simple docstring''' requires_backends(self, ["""note_seq"""] ) @classmethod def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ): '''simple docstring''' requires_backends(cls, ["""note_seq"""] ) @classmethod def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ): '''simple docstring''' requires_backends(cls, ["""note_seq"""] )
64
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,) SCREAMING_SNAKE_CASE__ = 10 def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = { """num_train_timesteps""": 2_01, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**lowerCamelCase_ ) return config def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0] SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1] SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 1 scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = scheduler.timesteps SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCamelCase_ ): # 1. scale model input SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0] scheduler.set_timesteps(timesteps=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Dict = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0] with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowerCamelCase_ )
323
0
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge UpperCamelCase__ = [ 'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the' ' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe' ' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.', 'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal' ' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s' ' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the' ' body.', 'Amnesty International releases its annual report on the death penalty. The report catalogs the use of' ' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the' ' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital' ' punishment.', ] UpperCamelCase__ = [ 'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .' ' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz' ' had informed his Lufthansa training school of an episode of severe depression, airline says .', 'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .' ' Israel and the United States opposed the move, which could open the door to war crimes investigations against' ' Israelis .', 'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to' ' death . Organization claims that governments around the world are using the threat of terrorism to advance' ' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death' ' sentences up by 28% .', ] def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase__ = calculate_rouge(__A, __A, bootstrap_aggregation=__A, rouge_keys=["rouge2", "rougeL"] ) assert isinstance(__A, __A ) UpperCAmelCase__ = calculate_rouge(__A, __A, bootstrap_aggregation=__A, rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def lowerCAmelCase_ ( ) -> Tuple: '''simple docstring''' UpperCAmelCase__ = "rougeLsum" UpperCAmelCase__ = calculate_rouge(__A, __A, newline_sep=__A, rouge_keys=[k] )[k] UpperCAmelCase__ = calculate_rouge(__A, __A, newline_sep=__A, rouge_keys=[k] )[k] assert score > score_no_sep def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase__ = ["rouge1", "rouge2", "rougeL"] UpperCAmelCase__ = calculate_rouge(__A, __A, newline_sep=__A, rouge_keys=__A ) UpperCAmelCase__ = calculate_rouge(__A, __A, newline_sep=__A, rouge_keys=__A ) assert score_sep == score_no_sep def lowerCAmelCase_ ( ) -> int: '''simple docstring''' UpperCAmelCase__ = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] UpperCAmelCase__ = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(__A, __A, newline_sep=__A ) == calculate_rouge(__A, __A, newline_sep=__A ) def lowerCAmelCase_ ( ) -> Tuple: '''simple docstring''' UpperCAmelCase__ = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] UpperCAmelCase__ = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] UpperCAmelCase__ = calculate_rouge(__A, __A, rouge_keys=["rougeLsum"], newline_sep=__A )["rougeLsum"] UpperCAmelCase__ = calculate_rouge(__A, __A, rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def lowerCAmelCase_ ( ) -> Optional[int]: '''simple docstring''' UpperCAmelCase__ = Path("examples/seq2seq/test_data/wmt_en_ro" ) UpperCAmelCase__ = calculate_rouge_path(data_dir.joinpath("test.source" ), data_dir.joinpath("test.target" ) ) assert isinstance(__A, __A ) UpperCAmelCase__ = calculate_rouge_path( data_dir.joinpath("test.source" ), data_dir.joinpath("test.target" ), bootstrap_aggregation=__A ) assert isinstance(__A, __A )
65
'''simple docstring''' from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ): '''simple docstring''' super().__init__( lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE : Optional[int] = Text( cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[str] = None self.builder.download_and_prepare( download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE : int = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory ) return dataset
323
0
"""simple docstring""" def A_ ( _lowercase ): '''simple docstring''' return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(_lowercase ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("doctest").testmod()
66
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 3_84 SCREAMING_SNAKE_CASE : Union[str, Any] = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE : List[str] = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2) SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE : Any = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE : int = 1_28 SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE : Optional[Any] = 12 SCREAMING_SNAKE_CASE : str = 5_12 elif "large" in model_name: SCREAMING_SNAKE_CASE : Tuple = 1_92 SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48) SCREAMING_SNAKE_CASE : Tuple = 12 SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68 # set label information SCREAMING_SNAKE_CASE : List[str] = 1_50 SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files""" SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig( embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) SCREAMING_SNAKE_CASE : List[str] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = val def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :] # fmt: on def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = x.shape[0] SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = x.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCamelCase_ , param.shape ) SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ ) if "bn" in key: SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" ) SCREAMING_SNAKE_CASE : Optional[Any] = val # rename keys SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ ) if "norm" in key: SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor() SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCAmelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
323
0
'''simple docstring''' import numpy as np def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: __lowerCamelCase = int(np.ceil((x_end - xa) / h ) ) __lowerCamelCase = np.zeros((n + 1,) ) __lowerCamelCase = ya __lowerCamelCase = xa for k in range(UpperCamelCase__ ): __lowerCamelCase = f(UpperCamelCase__ , y[k] ) __lowerCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) __lowerCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) __lowerCamelCase = f(x + h , y[k] + h * ka ) __lowerCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
67
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Any = pad_token_id SCREAMING_SNAKE_CASE : List[Any] = max_length SCREAMING_SNAKE_CASE : Optional[int] = vocab SCREAMING_SNAKE_CASE : List[Any] = merges SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab() return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ): '''simple docstring''' return cls(**lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ ) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs( lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
323
0
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = feature_size A__ = sampling_rate A__ = padding_value A__ = kwargs.pop("padding_side" , "right" ) A__ = kwargs.pop("return_attention_mask" , lowercase ) super().__init__(**lowercase ) def UpperCamelCase ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature: '''simple docstring''' if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): A__ = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F' to this method that includes {self.model_input_names[0]}, but you provided' F' {list(processed_features.keys() )}' ) A__ = processed_features[self.model_input_names[0]] A__ = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase ) == 0: if return_attention_mask: A__ = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch A__ = required_input[0] if isinstance(lowercase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. A__ = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase ): A__ = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase ): A__ = "tf" elif is_torch_tensor(lowercase ): A__ = "pt" elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ): A__ = "np" else: raise ValueError( F'type of {first_element} unknown: {type(lowercase )}. ' "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): A__ = to_numpy(lowercase ) else: A__ = [to_numpy(lowercase ) for v in value] # Convert padding_strategy in PaddingStrategy A__ = self._get_padding_strategies(padding=lowercase , max_length=lowercase ) A__ = processed_features[self.model_input_names[0]] A__ = len(lowercase ) if not all(len(lowercase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) A__ = [] for i in range(lowercase ): A__ = {k: v[i] for k, v in processed_features.items()} # truncation A__ = self._truncate( lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , ) truncated_inputs.append(lowercase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) A__ = PaddingStrategy.MAX_LENGTH A__ = {} for i in range(lowercase ): # padding A__ = self._pad( truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , ) for key, value in outputs.items(): if key not in batch_outputs: A__ = [] if value.dtype is np.dtype(np.floataa ): A__ = value.astype(np.floataa ) batch_outputs[key].append(lowercase ) return BatchFeature(lowercase , tensor_type=lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict: '''simple docstring''' A__ = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: A__ = len(lowercase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: A__ = np.ones(len(lowercase ) , dtype=np.intaa ) if needs_to_be_padded: A__ = max_length - len(lowercase ) if self.padding_side == "right": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (0, difference) ) A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (difference, 0) ) A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]: '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) A__ = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = len(lowercase ) > max_length if needs_to_be_truncated: A__ = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: A__ = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase ( self , lowercase=False , lowercase=None ) -> Any: '''simple docstring''' if padding is not False: if padding is True: A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase , lowercase ): A__ = PaddingStrategy(lowercase ) elif isinstance(lowercase , lowercase ): A__ = padding else: A__ = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
68
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train""" SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Union[str, Any] = streaming SCREAMING_SNAKE_CASE : Optional[int] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Any ): '''simple docstring''' pass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : int = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Tuple = streaming SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Dict ): '''simple docstring''' pass
323
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = 42 class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self, lowerCAmelCase__ = 6_5536, lowerCAmelCase__ = None, lowerCAmelCase__ = 2, lowerCAmelCase__ = 2, lowerCAmelCase__ = 0, lowerCAmelCase__ = "fourier", lowerCAmelCase__ = True, lowerCAmelCase__ = False, lowerCAmelCase__ = 0.0, lowerCAmelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), lowerCAmelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), lowerCAmelCase__ = "UNetMidBlock1D", lowerCAmelCase__ = None, lowerCAmelCase__ = (32, 32, 64), lowerCAmelCase__ = None, lowerCAmelCase__ = 8, lowerCAmelCase__ = 1, lowerCAmelCase__ = False, ) -> Union[str, Any]: super().__init__() snake_case_ = sample_size # time if time_embedding_type == "fourier": snake_case_ = GaussianFourierProjection( embedding_size=8, set_W_to_weight=lowerCAmelCase__, log=lowerCAmelCase__, flip_sin_to_cos=lowerCAmelCase__) snake_case_ = 2 * block_out_channels[0] elif time_embedding_type == "positional": snake_case_ = Timesteps( block_out_channels[0], flip_sin_to_cos=lowerCAmelCase__, downscale_freq_shift=lowerCAmelCase__) snake_case_ = block_out_channels[0] if use_timestep_embedding: snake_case_ = block_out_channels[0] * 4 snake_case_ = TimestepEmbedding( in_channels=lowerCAmelCase__, time_embed_dim=lowerCAmelCase__, act_fn=lowerCAmelCase__, out_dim=block_out_channels[0], ) snake_case_ = nn.ModuleList([]) snake_case_ = None snake_case_ = nn.ModuleList([]) snake_case_ = None # down snake_case_ = in_channels for i, down_block_type in enumerate(lowerCAmelCase__): snake_case_ = output_channel snake_case_ = block_out_channels[i] if i == 0: input_channel += extra_in_channels snake_case_ = i == len(lowerCAmelCase__) - 1 snake_case_ = get_down_block( lowerCAmelCase__, num_layers=lowerCAmelCase__, in_channels=lowerCAmelCase__, out_channels=lowerCAmelCase__, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, ) self.down_blocks.append(lowerCAmelCase__) # mid snake_case_ = get_mid_block( lowerCAmelCase__, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=lowerCAmelCase__, add_downsample=lowerCAmelCase__, ) # up snake_case_ = list(reversed(lowerCAmelCase__)) snake_case_ = reversed_block_out_channels[0] if out_block_type is None: snake_case_ = out_channels else: snake_case_ = block_out_channels[0] for i, up_block_type in enumerate(lowerCAmelCase__): snake_case_ = output_channel snake_case_ = ( reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase__) - 1 else final_upsample_channels ) snake_case_ = i == len(lowerCAmelCase__) - 1 snake_case_ = get_up_block( lowerCAmelCase__, num_layers=lowerCAmelCase__, in_channels=lowerCAmelCase__, out_channels=lowerCAmelCase__, temb_channels=block_out_channels[0], add_upsample=not is_final_block, ) self.up_blocks.append(lowerCAmelCase__) snake_case_ = output_channel # out snake_case_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) snake_case_ = get_out_block( out_block_type=lowerCAmelCase__, num_groups_out=lowerCAmelCase__, embed_dim=block_out_channels[0], out_channels=lowerCAmelCase__, act_fn=lowerCAmelCase__, fc_dim=block_out_channels[-1] // 4, ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = True, ) -> Union[UNetaDOutput, Tuple]: snake_case_ = timestep if not torch.is_tensor(lowerCAmelCase__): snake_case_ = torch.tensor([timesteps], dtype=torch.long, device=sample.device) elif torch.is_tensor(lowerCAmelCase__) and len(timesteps.shape) == 0: snake_case_ = timesteps[None].to(sample.device) snake_case_ = self.time_proj(lowerCAmelCase__) if self.config.use_timestep_embedding: snake_case_ = self.time_mlp(lowerCAmelCase__) else: snake_case_ = timestep_embed[..., None] snake_case_ = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) snake_case_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down snake_case_ = () for downsample_block in self.down_blocks: snake_case_ , snake_case_ = downsample_block(hidden_states=lowerCAmelCase__, temb=lowerCAmelCase__) down_block_res_samples += res_samples # 3. mid if self.mid_block: snake_case_ = self.mid_block(lowerCAmelCase__, lowerCAmelCase__) # 4. up for i, upsample_block in enumerate(self.up_blocks): snake_case_ = down_block_res_samples[-1:] snake_case_ = down_block_res_samples[:-1] snake_case_ = upsample_block(lowerCAmelCase__, res_hidden_states_tuple=lowerCAmelCase__, temb=lowerCAmelCase__) # 5. post-process if self.out_block: snake_case_ = self.out_block(lowerCAmelCase__, lowerCAmelCase__) if not return_dict: return (sample,) return UNetaDOutput(sample=lowerCAmelCase__)
69
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small""" SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = """en_speaker_1""" SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string""" SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json""" SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings""" def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) SCREAMING_SNAKE_CASE : List[str] = 35 SCREAMING_SNAKE_CASE : List[Any] = 2 SCREAMING_SNAKE_CASE : int = 8 SCREAMING_SNAKE_CASE : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string ) SCREAMING_SNAKE_CASE : Tuple = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
323
0
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): """simple docstring""" for nxt, d in graph[v]: if nxt in visited_forward: continue _lowerCAmelCase = cst_fwd.get(lowerCAmelCase , np.inf ) _lowerCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _lowerCAmelCase = new_cost_f _lowerCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = -1 _lowerCAmelCase = set() _lowerCAmelCase = set() _lowerCAmelCase = {source: 0} _lowerCAmelCase = {destination: 0} _lowerCAmelCase = {source: None} _lowerCAmelCase = {destination: None} _lowerCAmelCase = PriorityQueue() _lowerCAmelCase = PriorityQueue() _lowerCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _lowerCAmelCase , _lowerCAmelCase = queue_forward.get() visited_forward.add(lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase = queue_backward.get() visited_backward.add(lowerCAmelCase ) _lowerCAmelCase = pass_and_relaxation( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) _lowerCAmelCase = pass_and_relaxation( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _lowerCAmelCase = shortest_distance return shortest_path_distance A__ : Optional[int] ={ '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } A__ : int ={ '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
70
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCAmelCase = logging.getLogger(__name__) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return (preds == labels).mean() @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ ) # Set seed set_seed(training_args.seed ) try: SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]() SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels() SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , ) # Get datasets SCREAMING_SNAKE_CASE : Optional[Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE : Dict = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase_ ) -> Dict: SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )} # Data collator SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE : Any = Trainer( model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE : Optional[Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate() SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase_ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(lowerCamelCase_ ) return results def __A ( lowerCamelCase_ ): """simple docstring""" main() if __name__ == "__main__": main()
323
0
from ..utils import DummyObject, requires_backends class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : List[Any] =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : Union[str, Any] =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : Tuple =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : List[str] =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : Optional[int] =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : str =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : Optional[Any] =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : List[Any] =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : List[Any] =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : Optional[Any] =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : Optional[Any] =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : Optional[int] =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) class __A ( metaclass=a ): """simple docstring""" UpperCamelCase__ : Optional[Any] =["""flax"""] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def __lowercase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" requires_backends(cls , ['flax'] )
71
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block SCREAMING_SNAKE_CASE : int = torch.nn.Convad( lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] ) # down SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0] for i, down_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = output_channel SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i] SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block( lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) self.down_blocks.append(lowerCamelCase_ ) # mid SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # out SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU() SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Tuple = False def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = x SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[Any] ): def custom_forward(*lowerCamelCase_ : List[str] ): return module(*lowerCamelCase_ ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ ) else: # down for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ ) # post-process SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : int = layers_per_block SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad( lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] ) SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None # mid SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # up SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : List[Any] = get_up_block( lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , ) self.up_blocks.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = output_channel # out if norm_type == "spatial": SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : Dict = nn.SiLU() SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Dict = False def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = z SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[str] ): def custom_forward(*lowerCamelCase_ : str ): return module(*lowerCamelCase_ ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ ) # post-process if latent_embeds is None: SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = n_e SCREAMING_SNAKE_CASE : int = vq_embed_dim SCREAMING_SNAKE_CASE : Tuple = beta SCREAMING_SNAKE_CASE : Union[str, Any] = legacy SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) SCREAMING_SNAKE_CASE : Optional[Any] = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0] SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed SCREAMING_SNAKE_CASE : Any = self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: SCREAMING_SNAKE_CASE : Optional[int] = n_e SCREAMING_SNAKE_CASE : Any = sane_index_shape def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 ) SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1 if self.unknown_index == "random": SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: SCREAMING_SNAKE_CASE : Any = self.unknown_index return new.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) if self.re_embed > self.used.shape[0]: # extra token SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ ) return back.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous() SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 ) SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape ) SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : List[str] = None # compute loss for embedding if not self.legacy: SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach() # reshape back to match original input shape SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): '''simple docstring''' if self.remap is not None: SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again # get quantized latent vectors SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ ) if shape is not None: SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ ) # reshape back to match original input shape SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = parameters SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 ) SCREAMING_SNAKE_CASE : Dict = deterministic SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar ) SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar ) if self.deterministic: SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = randn_tensor( self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample return x def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.mean
323
0
"""simple docstring""" import pprint import requests lowerCAmelCase__ = '''https://zenquotes.io/api''' def snake_case_ ( ): '''simple docstring''' return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case_ ( ): '''simple docstring''' return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": lowerCAmelCase__ = random_quotes() pprint.pprint(response)
72
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : str = 3 SCREAMING_SNAKE_CASE : List[Any] = (32, 32) SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input return init_dict, inputs_dict
323
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a ={ """configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =[ """GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoForCausalLM""", """GPTNeoForQuestionAnswering""", """GPTNeoForSequenceClassification""", """GPTNeoForTokenClassification""", """GPTNeoModel""", """GPTNeoPreTrainedModel""", """load_tf_weights_in_gpt_neo""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =[ """FlaxGPTNeoForCausalLM""", """FlaxGPTNeoModel""", """FlaxGPTNeoPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
73
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' pass def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ ) import datasets SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) SCREAMING_SNAKE_CASE : Any = depth_estimator( [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] ) self.assertEqual( [ {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, ] , lowerCamelCase_ , ) @require_tf @unittest.skip("""Depth estimation is not implemented in TF""" ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large""" SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
323
0
"""simple docstring""" from __future__ import annotations _lowercase = [True] * 1_00_00_01 _lowercase = 2 while i * i <= 1_00_00_00: if seive[i]: for j in range(i * i, 1_00_00_01, i): _lowercase = False i += 1 def _snake_case ( snake_case__ : int ): return seive[n] def _snake_case ( snake_case__ : int ): return any(digit in '02468' for digit in str(snake_case__ ) ) def _snake_case ( snake_case__ : int = 100_0000 ): A = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(snake_case__ ) and not contains_an_even_digit(snake_case__ ): A = str(snake_case__ ) A = [int(str_num[j:] + str_num[:j] ) for j in range(len(snake_case__ ) )] if all(is_prime(snake_case__ ) for i in list_nums ): result.append(snake_case__ ) return result def _snake_case ( ): return len(find_circular_primes() ) if __name__ == "__main__": print(F"""{len(find_circular_primes()) = }""")
74
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18} SCREAMING_SNAKE_CASE : Optional[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = min_resolution SCREAMING_SNAKE_CASE : List[str] = max_resolution SCREAMING_SNAKE_CASE : str = do_resize SCREAMING_SNAKE_CASE : Optional[Any] = size SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize SCREAMING_SNAKE_CASE : List[Any] = image_mean SCREAMING_SNAKE_CASE : str = image_std def lowerCamelCase_ ( self : Any ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' pass def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
323
0
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) a_ : Tuple = logging.getLogger(__name__) @dataclass(frozen=lowerCamelCase__ ) class __UpperCamelCase : lowercase : str lowercase : str lowercase : Optional[str] =None lowercase : Optional[str] =None lowercase : Optional[str] =None @dataclass(frozen=lowerCamelCase__ ) class __UpperCamelCase : lowercase : List[int] lowercase : Optional[List[int]] =None lowercase : Optional[List[int]] =None lowercase : Optional[Union[int, float]] =None lowercase : Optional[int] =None if is_torch_available(): import torch from torch.utils.data import Dataset class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[InputFeatures] def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase=False, lowerCAmelCase = False, ): """simple docstring""" lowerCamelCase_ =hans_processors[task]() lowerCamelCase_ =os.path.join( lowerCAmelCase, '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''', tokenizer.__class__.__name__, str(lowerCAmelCase ), lowerCAmelCase, ), ) lowerCamelCase_ =processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCamelCase_, lowerCamelCase_ =label_list[2], label_list[1] lowerCamelCase_ =label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase_ =cached_features_file + '''.lock''' with FileLock(lowerCAmelCase ): if os.path.exists(lowerCAmelCase ) and not overwrite_cache: logger.info(f'''Loading features from cached file {cached_features_file}''' ) lowerCamelCase_ =torch.load(lowerCAmelCase ) else: logger.info(f'''Creating features from dataset file at {data_dir}''' ) lowerCamelCase_ =( processor.get_dev_examples(lowerCAmelCase ) if evaluate else processor.get_train_examples(lowerCAmelCase ) ) logger.info('''Training examples: %s''', len(lowerCAmelCase ) ) lowerCamelCase_ =hans_convert_examples_to_features(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) logger.info('''Saving features into cached file %s''', lowerCAmelCase ) torch.save(self.features, lowerCAmelCase ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self, lowerCAmelCase ): """simple docstring""" return self.features[i] def lowercase__ ( self ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class __UpperCamelCase : lowercase : List[InputFeatures] def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 128, lowerCAmelCase=False, lowerCAmelCase = False, ): """simple docstring""" lowerCamelCase_ =hans_processors[task]() lowerCamelCase_ =processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCamelCase_, lowerCamelCase_ =label_list[2], label_list[1] lowerCamelCase_ =label_list lowerCamelCase_ =processor.get_dev_examples(lowerCAmelCase ) if evaluate else processor.get_train_examples(lowerCAmelCase ) lowerCamelCase_ =hans_convert_examples_to_features(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ), desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(lowerCAmelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) lowerCamelCase_ =tf.data.Dataset.from_generator( lowerCAmelCase, ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ), ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ), ) def lowercase__ ( self ): """simple docstring""" return self.dataset def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self, lowerCAmelCase ): """simple docstring""" return self.features[i] def lowercase__ ( self ): """simple docstring""" return self.label_list class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase, '''heuristics_train_set.txt''' ) ), '''train''' ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase, '''heuristics_evaluation_set.txt''' ) ), '''dev''' ) def lowercase__ ( self ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for i, line in enumerate(lowerCAmelCase ): if i == 0: continue lowerCamelCase_ ='''%s-%s''' % (set_type, line[0]) lowerCamelCase_ =line[5] lowerCamelCase_ =line[6] lowerCamelCase_ =line[7][2:] if line[7].startswith('''ex''' ) else line[7] lowerCamelCase_ =line[0] examples.append(InputExample(guid=lowerCAmelCase, text_a=lowerCAmelCase, text_b=lowerCAmelCase, label=lowerCAmelCase, pairID=lowerCAmelCase ) ) return examples def a_ ( __snake_case : List[InputExample] , __snake_case : List[str] , __snake_case : int , __snake_case : PreTrainedTokenizer , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ ={label: i for i, label in enumerate(__snake_case )} lowerCamelCase_ =[] for ex_index, example in tqdm.tqdm(enumerate(__snake_case ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d''' % (ex_index) ) lowerCamelCase_ =tokenizer( example.text_a , example.text_b , add_special_tokens=__snake_case , max_length=__snake_case , padding='''max_length''' , truncation=__snake_case , return_overflowing_tokens=__snake_case , ) lowerCamelCase_ =label_map[example.label] if example.label in label_map else 0 lowerCamelCase_ =int(example.pairID ) features.append(InputFeatures(**__snake_case , label=__snake_case , pairID=__snake_case ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(F'''guid: {example}''' ) logger.info(F'''features: {features[i]}''' ) return features a_ : int = { """hans""": 3, } a_ : List[str] = { """hans""": HansProcessor, }
75
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCAmelCase = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
323
0
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _UpperCamelCase ( __A ): '''simple docstring''' @slow @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) SCREAMING_SNAKE_CASE : Any = BertTokenizer.from_pretrained("bert-base-uncased" ) SCREAMING_SNAKE_CASE : int = bertabert.config.encoder.vocab_size SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.sep_token_id SCREAMING_SNAKE_CASE : Tuple = tokenizer.cls_token_id SCREAMING_SNAKE_CASE : Optional[int] = 128 SCREAMING_SNAKE_CASE : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) SCREAMING_SNAKE_CASE : Optional[int] = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) SCREAMING_SNAKE_CASE : List[str] = train_dataset.select(range(32 ) ) SCREAMING_SNAKE_CASE : Dict = val_dataset.select(range(16 ) ) SCREAMING_SNAKE_CASE : str = 4 def _map_to_encoder_decoder_inputs(a : List[str] ): # Tokenizer will automatically set [BOS] <text> [EOS] SCREAMING_SNAKE_CASE : Any = tokenizer(batch["article"] , padding="max_length" , truncation=a , max_length=512 ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(batch["highlights"] , padding="max_length" , truncation=a , max_length=128 ) SCREAMING_SNAKE_CASE : Optional[Any] = inputs.input_ids SCREAMING_SNAKE_CASE : List[str] = inputs.attention_mask SCREAMING_SNAKE_CASE : str = outputs.input_ids SCREAMING_SNAKE_CASE : List[str] = outputs.input_ids.copy() SCREAMING_SNAKE_CASE : List[str] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] SCREAMING_SNAKE_CASE : Any = outputs.attention_mask assert all(len(a ) == 512 for x in inputs.input_ids ) assert all(len(a ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(a : int ): SCREAMING_SNAKE_CASE : Dict = pred.label_ids SCREAMING_SNAKE_CASE : Optional[Any] = pred.predictions # all unnecessary tokens are removed SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_decode(a , skip_special_tokens=a ) SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(a , skip_special_tokens=a ) SCREAMING_SNAKE_CASE : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a ) )] ) / len(a ) return {"accuracy": accuracy} # map train dataset SCREAMING_SNAKE_CASE : Optional[Any] = train_dataset.map( _map_to_encoder_decoder_inputs , batched=a , batch_size=a , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset SCREAMING_SNAKE_CASE : Union[str, Any] = val_dataset.map( _map_to_encoder_decoder_inputs , batched=a , batch_size=a , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE : Any = SeqaSeqTrainingArguments( output_dir=a , per_device_train_batch_size=a , per_device_eval_batch_size=a , predict_with_generate=a , evaluation_strategy="steps" , do_train=a , do_eval=a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer SCREAMING_SNAKE_CASE : Optional[Any] = SeqaSeqTrainer( model=a , args=a , compute_metrics=_compute_metrics , train_dataset=a , eval_dataset=a , tokenizer=a , ) # start training trainer.train()
76
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
323
0
"""simple docstring""" def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : List[str] = len(_lowerCAmelCase ) + 1 lowercase__ : Any = len(_lowerCAmelCase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. lowercase__ : List[str] = [[0 for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )] # since string of zero length match pattern of zero length lowercase__ : Any = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , _lowerCAmelCase ): lowercase__ : Tuple = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , _lowerCAmelCase ): lowercase__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == '*' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , _lowerCAmelCase ): for j in range(1 , _lowerCAmelCase ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": lowercase__ : List[Any] = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: lowercase__ : Union[str, Any] = 1 elif pattern[j - 2] in (input_string[i - 1], "."): lowercase__ : Tuple = dp[i - 1][j] else: lowercase__ : Tuple = 0 else: lowercase__ : List[Any] = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") _UpperCamelCase : Any = "aab" _UpperCamelCase : int = "c*a*b" # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f'''{input_string} matches the given pattern {pattern}''') else: print(f'''{input_string} does not match with the given pattern {pattern}''')
77
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number | (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number & ~(1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number ^ (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return ((number >> position) & 1) == 1 def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
323
0
"""simple docstring""" import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa snake_case_ = logging.getLogger(__name__) class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """summarization""" __UpperCamelCase = ["""loss"""] __UpperCamelCase = ROUGE_KEYS __UpperCamelCase = """rouge2""" def __init__( self :Optional[int] , lowercase_ :Optional[int] , **lowercase_ :List[Any] ) -> Dict: if hparams.sortish_sampler and hparams.gpus > 1: UpperCAmelCase = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' ) if hparams.sortish_sampler: raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' ) super().__init__(lowercase_ , num_labels=lowercase_ , mode=self.mode , **lowercase_ ) use_task_specific_params(self.model , 'summarization' ) save_git_info(self.hparams.output_dir ) UpperCAmelCase = Path(self.output_dir ) / 'metrics.json' UpperCAmelCase = Path(self.output_dir ) / 'hparams.pkl' pickle_save(self.hparams , self.hparams_save_path ) UpperCAmelCase = 0 UpperCAmelCase = defaultdict(lowercase_ ) UpperCAmelCase = self.config.model_type UpperCAmelCase = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size UpperCAmelCase = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } UpperCAmelCase = { 'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test, } UpperCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} UpperCAmelCase = { 'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}""" assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}""" if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) UpperCAmelCase = get_git_info()['repo_sha'] UpperCAmelCase = hparams.num_workers UpperCAmelCase = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase_ ): UpperCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang] UpperCAmelCase = self.decoder_start_token_id UpperCAmelCase = ( SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset ) UpperCAmelCase = False UpperCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: UpperCAmelCase = self.hparams.eval_max_gen_length else: UpperCAmelCase = self.model.config.max_length UpperCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def UpperCAmelCase__ ( self :str , lowercase_ :Dict[str, torch.Tensor] ) -> Dict[str, List[str]]: UpperCAmelCase = { k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items() } save_json(lowercase_ , Path(self.output_dir ) / 'text_batch.json' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' ) UpperCAmelCase = True return readable_batch def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[Any] , **lowercase_ :int ) -> Optional[int]: return self.model(lowercase_ , **lowercase_ ) def UpperCAmelCase__ ( self :Any , lowercase_ :List[int] ) -> Optional[int]: UpperCAmelCase = self.tokenizer.batch_decode( lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) return lmap(str.strip , lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :dict ) -> Tuple: UpperCAmelCase = self.tokenizer.pad_token_id UpperCAmelCase , UpperCAmelCase = batch['input_ids'], batch['attention_mask'] UpperCAmelCase = batch['labels'] if isinstance(self.model , lowercase_ ): UpperCAmelCase = self.model._shift_right(lowercase_ ) else: UpperCAmelCase = shift_tokens_right(lowercase_ , lowercase_ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero UpperCAmelCase = decoder_input_ids self.save_readable_batch(lowercase_ ) UpperCAmelCase = self(lowercase_ , attention_mask=lowercase_ , decoder_input_ids=lowercase_ , use_cache=lowercase_ ) UpperCAmelCase = outputs['logits'] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id UpperCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase_ ) assert lm_logits.shape[-1] == self.vocab_size UpperCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: UpperCAmelCase = nn.functional.log_softmax(lowercase_ , dim=-1 ) UpperCAmelCase , UpperCAmelCase = label_smoothed_nll_loss( lowercase_ , lowercase_ , self.hparams.label_smoothing , ignore_index=lowercase_ ) return (loss,) @property def UpperCAmelCase__ ( self :Union[str, Any] ) -> int: return self.tokenizer.pad_token_id def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Tuple , lowercase_ :List[Any] ) -> Dict: UpperCAmelCase = self._step(lowercase_ ) UpperCAmelCase = dict(zip(self.loss_names , lowercase_ ) ) # tokens per batch UpperCAmelCase = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum() UpperCAmelCase = batch['input_ids'].shape[0] UpperCAmelCase = batch['input_ids'].eq(self.pad ).sum() UpperCAmelCase = batch['input_ids'].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :List[str] ) -> Dict: return self._generative_step(lowercase_ ) def UpperCAmelCase__ ( self :Tuple , lowercase_ :int , lowercase_ :str="val" ) -> Dict: self.step_count += 1 UpperCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} UpperCAmelCase = losses['loss'] UpperCAmelCase = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len'] } UpperCAmelCase = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) UpperCAmelCase = torch.tensor(lowercase_ ).type_as(lowercase_ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowercase_ ) UpperCAmelCase = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()} UpperCAmelCase = self.step_count self.metrics[prefix].append(lowercase_ ) # callback writes this to self.metrics_save_path UpperCAmelCase = flatten_list([x['preds'] for x in outputs] ) return { "log": all_metrics, "preds": preds, f"""{prefix}_loss""": loss, f"""{prefix}_{self.val_metric}""": metric_tensor, } def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Tuple , lowercase_ :List[str] ) -> Dict: return calculate_rouge(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :dict ) -> dict: UpperCAmelCase = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') UpperCAmelCase = self.model.generate( batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) UpperCAmelCase = (time.time() - ta) / batch['input_ids'].shape[0] UpperCAmelCase = self.ids_to_clean_text(lowercase_ ) UpperCAmelCase = self.ids_to_clean_text(batch['labels'] ) UpperCAmelCase = self._step(lowercase_ ) UpperCAmelCase = dict(zip(self.loss_names , lowercase_ ) ) UpperCAmelCase = self.calc_generative_metrics(lowercase_ , lowercase_ ) UpperCAmelCase = np.mean(lmap(lowercase_ , lowercase_ ) ) base_metrics.update(gen_time=lowercase_ , gen_len=lowercase_ , preds=lowercase_ , target=lowercase_ , **lowercase_ ) return base_metrics def UpperCAmelCase__ ( self :Dict , lowercase_ :Optional[Any] , lowercase_ :Dict ) -> List[Any]: return self._generative_step(lowercase_ ) def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] ) -> int: return self.validation_epoch_end(lowercase_ , prefix='test' ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any ) -> SeqaSeqDataset: UpperCAmelCase = self.n_obs[type_path] UpperCAmelCase = self.target_lens[type_path] UpperCAmelCase = self.dataset_class( self.tokenizer , type_path=lowercase_ , n_obs=lowercase_ , max_target_length=lowercase_ , **self.dataset_kwargs , ) return dataset def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str , lowercase_ :int , lowercase_ :bool = False ) -> DataLoader: UpperCAmelCase = self.get_dataset(lowercase_ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": UpperCAmelCase = dataset.make_sortish_sampler(lowercase_ , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase_ , batch_size=lowercase_ , collate_fn=dataset.collate_fn , shuffle=lowercase_ , num_workers=self.num_workers , sampler=lowercase_ , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": UpperCAmelCase = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase_ , batch_sampler=lowercase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowercase_ , batch_size=lowercase_ , collate_fn=dataset.collate_fn , shuffle=lowercase_ , num_workers=self.num_workers , sampler=lowercase_ , ) def UpperCAmelCase__ ( self :Optional[int] ) -> DataLoader: UpperCAmelCase = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase_ ) return dataloader def UpperCAmelCase__ ( self :Optional[int] ) -> DataLoader: return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size ) def UpperCAmelCase__ ( self :List[Any] ) -> DataLoader: return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size ) @staticmethod def UpperCAmelCase__ ( lowercase_ :List[Any] , lowercase_ :Tuple ) -> List[Any]: BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ ) add_generic_args(lowercase_ , lowercase_ ) parser.add_argument( '--max_source_length' , default=10_24 , type=lowercase_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--max_target_length' , default=56 , type=lowercase_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--val_max_target_length' , default=1_42 , type=lowercase_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--test_max_target_length' , default=1_42 , type=lowercase_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument('--freeze_encoder' , action='store_true' ) parser.add_argument('--freeze_embeds' , action='store_true' ) parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase_ ) parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase_ ) parser.add_argument('--max_tokens_per_batch' , type=lowercase_ , default=lowercase_ ) parser.add_argument('--logger_name' , type=lowercase_ , choices=['default', 'wandb', 'wandb_shared'] , default='default' ) parser.add_argument('--n_train' , type=lowercase_ , default=-1 , required=lowercase_ , help='# examples. -1 means use all.' ) parser.add_argument('--n_val' , type=lowercase_ , default=5_00 , required=lowercase_ , help='# examples. -1 means use all.' ) parser.add_argument('--n_test' , type=lowercase_ , default=-1 , required=lowercase_ , help='# examples. -1 means use all.' ) parser.add_argument( '--task' , type=lowercase_ , default='summarization' , required=lowercase_ , help='# examples. -1 means use all.' ) parser.add_argument('--label_smoothing' , type=lowercase_ , default=0.0 , required=lowercase_ ) parser.add_argument('--src_lang' , type=lowercase_ , default='' , required=lowercase_ ) parser.add_argument('--tgt_lang' , type=lowercase_ , default='' , required=lowercase_ ) parser.add_argument('--eval_beams' , type=lowercase_ , default=lowercase_ , required=lowercase_ ) parser.add_argument( '--val_metric' , type=lowercase_ , default=lowercase_ , required=lowercase_ , choices=['bleu', 'rouge2', 'loss', None] ) parser.add_argument('--eval_max_gen_length' , type=lowercase_ , default=lowercase_ , help='never generate more than n tokens' ) parser.add_argument('--save_top_k' , type=lowercase_ , default=1 , required=lowercase_ , help='How many checkpoints to save' ) parser.add_argument( '--early_stopping_patience' , type=lowercase_ , default=-1 , required=lowercase_ , help=( '-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So' ' val_check_interval will effect it.' ) , ) return parser class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """translation""" __UpperCamelCase = ["""loss"""] __UpperCamelCase = ["""bleu"""] __UpperCamelCase = """bleu""" def __init__( self :List[str] , lowercase_ :Tuple , **lowercase_ :List[Any] ) -> Optional[int]: super().__init__(lowercase_ , **lowercase_ ) UpperCAmelCase = hparams.src_lang UpperCAmelCase = hparams.tgt_lang def UpperCAmelCase__ ( self :List[str] , lowercase_ :Dict , lowercase_ :Optional[Any] ) -> dict: return calculate_bleu(lowercase_ , lowercase_ ) def _lowerCAmelCase ( lowercase_ , lowercase_=None ): Path(args.output_dir ).mkdir(exist_ok=lowercase_ ) check_output_dir(lowercase_ , expected_items=3 ) if model is None: if "summarization" in args.task: UpperCAmelCase = SummarizationModule(lowercase_ ) else: UpperCAmelCase = TranslationModule(lowercase_ ) UpperCAmelCase = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('/tmp' ) or str(args.output_dir ).startswith('/var' ) ): UpperCAmelCase = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger UpperCAmelCase = os.environ.get('WANDB_PROJECT' , lowercase_ ) UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=lowercase_ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" ) if args.early_stopping_patience >= 0: UpperCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: UpperCAmelCase = False UpperCAmelCase = args.val_metric == 'loss' UpperCAmelCase = generic_train( lowercase_ , lowercase_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , lowercase_ ) , early_stopping_callback=lowercase_ , logger=lowercase_ , ) pickle_save(model.hparams , model.output_dir / 'hparams.pkl' ) if not args.do_predict: return model UpperCAmelCase = '' UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=lowercase_ ) ) if checkpoints: UpperCAmelCase = checkpoints[-1] UpperCAmelCase = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() snake_case_ = pl.Trainer.add_argparse_args(parser) snake_case_ = SummarizationModule.add_model_specific_args(parser, os.getcwd()) snake_case_ = parser.parse_args() main(args)
78
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[int] = batch_size SCREAMING_SNAKE_CASE : Any = seq_length SCREAMING_SNAKE_CASE : List[str] = is_training SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : List[Any] = rotary_dim SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = vocab_size - 1 SCREAMING_SNAKE_CASE : str = vocab_size - 1 SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1 def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : List[str] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 20 SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : str = model( input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 20 SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Dict = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @tooslow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id SCREAMING_SNAKE_CASE : str = jax.jit(model.generate ) SCREAMING_SNAKE_CASE : str = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = fx_state with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Tuple = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase_ )
323
0
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''Speech2TextFeatureExtractor''' snake_case = '''Speech2TextTokenizer''' def __init__( self : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ): '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase ) _A = self.feature_extractor _A = False def __call__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Tuple ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) _A = kwargs.pop("raw_speech" ) else: _A = kwargs.pop("audio" , __UpperCAmelCase ) _A = kwargs.pop("sampling_rate" , __UpperCAmelCase ) _A = kwargs.pop("text" , __UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: _A = args[0] _A = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: _A = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None: _A = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: _A = encodings["input_ids"] return inputs def lowerCAmelCase ( self : List[Any] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : Any , *__UpperCAmelCase : Any , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @contextmanager def lowerCAmelCase ( self : str ): '''simple docstring''' warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) _A = True _A = self.tokenizer yield _A = self.feature_extractor _A = False
79
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' f''' `n_embd`: {n_embd} are not equal.''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim SCREAMING_SNAKE_CASE : Tuple = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : str = ( nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : Any = GPTaConfig( vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 ) SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ): '''simple docstring''' return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.encode_prefix(lowerCamelCase_ ) @torch.no_grad() def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 ) SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : Tuple = [] for feature in features: SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature # Only support beam search for now SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam( input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = eos_token_id SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool ) if input_embeds is not None: SCREAMING_SNAKE_CASE : Dict = input_embeds else: SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ ) for i in range(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = outputs.logits SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log() if scores is None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: SCREAMING_SNAKE_CASE : List[Any] = next_tokens else: SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] ) SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 ) else: SCREAMING_SNAKE_CASE : Tuple = -float(np.inf ) SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits seq_lengths[~is_stopped] += 1 SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1] SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source] SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1] SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 ) SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source] SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source] SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source] SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 ) SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze() if is_stopped.all(): break SCREAMING_SNAKE_CASE : int = scores / seq_lengths SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ ) # tokens tensors are already padded to max_seq_length SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order] SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
323
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer a__ : int = logging.get_logger(__name__) a__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a__ : List[Any] = { 'vocab_file': { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt' ), } } a__ : Optional[Any] = { 'junnyu/roformer_chinese_small': 1_5_3_6, 'junnyu/roformer_chinese_base': 1_5_3_6, 'junnyu/roformer_chinese_char_small': 5_1_2, 'junnyu/roformer_chinese_char_base': 5_1_2, 'junnyu/roformer_small_discriminator': 1_2_8, 'junnyu/roformer_small_generator': 1_2_8, } a__ : str = { 'junnyu/roformer_chinese_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_base': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_base': {'do_lower_case': True}, 'junnyu/roformer_small_discriminator': {'do_lower_case': True}, 'junnyu/roformer_small_generator': {'do_lower_case': True}, } class lowercase_ ( a__ ): __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION __UpperCAmelCase = RoFormerTokenizer def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ): super().__init__( a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , ) UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("lowercase" , a ) != do_lower_case or pre_tok_state.get("strip_accents" , a ) != strip_accents ): UpperCamelCase__ = getattr(a , pre_tok_state.pop("type" ) ) UpperCamelCase__ = do_lower_case UpperCamelCase__ = strip_accents UpperCamelCase__ = pre_tok_class(**a ) UpperCamelCase__ = do_lower_case def __getstate__( self ): UpperCamelCase__ = self.__dict__.copy() UpperCamelCase__ = BertPreTokenizer() return state def __setstate__( self , a ): UpperCamelCase__ = d UpperCamelCase__ = self.__dict__["_tokenizer"].get_vocab() UpperCamelCase__ = PreTokenizer.custom(JiebaPreTokenizer(a ) ) def __a ( self , a , a=None ): UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __a ( self , a , a = None ): UpperCamelCase__ = [self.sep_token_id] UpperCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , a , a = None ): UpperCamelCase__ = self._tokenizer.model.save(a , name=a ) return tuple(a ) def __a ( self , a , a=None , a=None , a=False , **a , ): UpperCamelCase__ = BertPreTokenizer() return super().save_pretrained(a , a , a , a , **a )
80
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git_vision_model''' def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : str = attention_dropout SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : List[str] = hidden_act @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ): '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git''' def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ ) if vision_config is None: SCREAMING_SNAKE_CASE : Any = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : Tuple = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings SCREAMING_SNAKE_CASE : int = num_image_with_embedding SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id SCREAMING_SNAKE_CASE : str = eos_token_id def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
323
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = "naver-clova-ix/donut-base-finetuned-docvqa" __lowerCAmelCase = ( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) __lowerCAmelCase = "document_qa" __lowerCAmelCase = AutoProcessor __lowerCAmelCase = VisionEncoderDecoderModel __lowerCAmelCase = ["image", "text"] __lowerCAmelCase = ["text"] def __init__( self , *__A , **__A ) -> Tuple: if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__A , **__A ) def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> List[str]: a ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' a =task_prompt.replace('''{user_input}''' , __A ) a =self.pre_processor.tokenizer( __A , add_special_tokens=__A , return_tensors='''pt''' ).input_ids a =self.pre_processor(__A , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def SCREAMING_SNAKE_CASE ( self , __A ) -> int: return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]: a =self.pre_processor.batch_decode(__A )[0] a =sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) a =sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) a =re.sub(r'''<.*?>''' , '''''' , __A , count=1 ).strip() # remove first task start token a =self.pre_processor.tokenajson(__A ) return sequence["answer"]
81
'''simple docstring''' from manim import * class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 ) SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 ) SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i, rect in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 ) target.move_to(lowerCamelCase_ ) model_arr.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(lowerCamelCase_ ) self.add(*lowerCamelCase_ , *lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) disk.move_to([-4, -1.25, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 ) input.set_fill(lowerCamelCase_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 ) self.play(Write(lowerCamelCase_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 ) self.play(MoveToTarget(lowerCamelCase_ ) ) self.play(FadeOut(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02} self.play( Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) SCREAMING_SNAKE_CASE : Optional[int] = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) SCREAMING_SNAKE_CASE : Any = AnimationGroup( FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(lowerCamelCase_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: SCREAMING_SNAKE_CASE : Optional[Any] = 0.7 self.play( Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = a_c SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , ) SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) ) self.wait()
323
0
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class __lowerCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 1.0 , _snake_case = None , ): """simple docstring""" super().__init__() _lowerCAmelCase = initial_learning_rate _lowerCAmelCase = warmup_steps _lowerCAmelCase = power _lowerCAmelCase = decay_schedule_fn _lowerCAmelCase = name def __call__( self , _snake_case ): """simple docstring""" with tf.name_scope(self.name or """WarmUp""" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. _lowerCAmelCase = tf.cast(_snake_case , tf.floataa ) _lowerCAmelCase = tf.cast(self.warmup_steps , tf.floataa ) _lowerCAmelCase = global_step_float / warmup_steps_float _lowerCAmelCase = self.initial_learning_rate * tf.math.pow(_snake_case , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_snake_case , ) def snake_case ( self ): """simple docstring""" return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case = 0.0 , snake_case = 0.9 , snake_case = 0.999 , snake_case = 1E-8 , snake_case = None , snake_case = None , snake_case = 0.0 , snake_case = 1.0 , snake_case = None , ): """simple docstring""" _lowerCAmelCase = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=snake_case , ) if num_warmup_steps: _lowerCAmelCase = WarmUp( initial_learning_rate=snake_case , decay_schedule_fn=snake_case , warmup_steps=snake_case , ) if weight_decay_rate > 0.0: _lowerCAmelCase = AdamWeightDecay( learning_rate=snake_case , weight_decay_rate=snake_case , beta_a=snake_case , beta_a=snake_case , epsilon=snake_case , clipnorm=snake_case , global_clipnorm=snake_case , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=snake_case , ) else: _lowerCAmelCase = tf.keras.optimizers.Adam( learning_rate=snake_case , beta_a=snake_case , beta_a=snake_case , epsilon=snake_case , clipnorm=snake_case , global_clipnorm=snake_case , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class __lowerCAmelCase ( lowerCamelCase__ ): def __init__( self , _snake_case = 0.001 , _snake_case = 0.9 , _snake_case = 0.999 , _snake_case = 1e-7 , _snake_case = False , _snake_case = 0.0 , _snake_case = None , _snake_case = None , _snake_case = "AdamWeightDecay" , **_snake_case , ): """simple docstring""" super().__init__(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case ) _lowerCAmelCase = weight_decay_rate _lowerCAmelCase = include_in_weight_decay _lowerCAmelCase = exclude_from_weight_decay @classmethod def snake_case ( cls , _snake_case ): """simple docstring""" _lowerCAmelCase = {"""WarmUp""": WarmUp} return super(_snake_case , cls ).from_config(_snake_case , custom_objects=_snake_case ) def snake_case ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" super(_snake_case , self )._prepare_local(_snake_case , _snake_case , _snake_case ) _lowerCAmelCase = tf.constant( self.weight_decay_rate , name="""adam_weight_decay_rate""" ) def snake_case ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , ) return tf.no_op() def snake_case ( self , _snake_case , _snake_case=None , **_snake_case ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = list(zip(*_snake_case ) ) return super(_snake_case , self ).apply_gradients(zip(_snake_case , _snake_case ) , name=_snake_case , **_snake_case ) def snake_case ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" if apply_state is None: return self._decayed_lr_t[var_dtype], {} _lowerCAmelCase = apply_state or {} _lowerCAmelCase = apply_state.get((var_device, var_dtype) ) if coefficients is None: _lowerCAmelCase = self._fallback_apply_state(_snake_case , _snake_case ) _lowerCAmelCase = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def snake_case ( self , _snake_case , _snake_case , _snake_case=None ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , _snake_case ) _lowerCAmelCase = self._decay_weights_op(_snake_case , _snake_case , _snake_case ) with tf.control_dependencies([decay] ): return super(_snake_case , self )._resource_apply_dense(_snake_case , _snake_case , **_snake_case ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case=None ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , _snake_case ) _lowerCAmelCase = self._decay_weights_op(_snake_case , _snake_case , _snake_case ) with tf.control_dependencies([decay] ): return super(_snake_case , self )._resource_apply_sparse(_snake_case , _snake_case , _snake_case , **_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = super().get_config() config.update({"""weight_decay_rate""": self.weight_decay_rate} ) return config def snake_case ( self , _snake_case ): """simple docstring""" if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(_snake_case , _snake_case ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(_snake_case , _snake_case ) is not None: return False return True class __lowerCAmelCase ( lowerCamelCase__ ): def __init__( self ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = None @property def snake_case ( self ): """simple docstring""" if self._accum_steps is None: _lowerCAmelCase = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=_snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def snake_case ( self ): """simple docstring""" if not self._gradients: raise ValueError("""The accumulator should be called first to initialize the gradients""" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , _snake_case ): """simple docstring""" if not self._gradients: _lowerCAmelCase = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(_snake_case ) , trainable=_snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(_snake_case ) != len(self._gradients ): raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(_snake_case )}' ) for accum_gradient, gradient in zip(self._gradients , _snake_case ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(_snake_case ) self._accum_steps.assign_add(1 ) def snake_case ( self ): """simple docstring""" if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(_snake_case ) )
82
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = graph # mapping node to its parent in resulting breadth first tree SCREAMING_SNAKE_CASE : dict[str, str | None] = {} SCREAMING_SNAKE_CASE : List[str] = source_vertex def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex} SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue while queue: SCREAMING_SNAKE_CASE : str = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = vertex queue.append(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ ) if target_vertex_parent is None: SCREAMING_SNAKE_CASE : Tuple = ( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(lowerCamelCase_ ) return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}''' if __name__ == "__main__": __UpperCAmelCase = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
323
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer snake_case_ : Union[str, Any] = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast snake_case_ : List[Any] = TaTokenizerFast snake_case_ : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[Any] = [ 'MT5EncoderModel', 'MT5ForConditionalGeneration', 'MT5ForQuestionAnswering', 'MT5Model', 'MT5PreTrainedModel', 'MT5Stack', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : int = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : int = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model'] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys snake_case_ : Dict = _LazyModule( __name__, globals()['__file__'], _import_structure, extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast}, module_spec=__spec__, )
83
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCAmelCase = 0 __UpperCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCAmelCase = tuple[int, int] class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = pos_x SCREAMING_SNAKE_CASE : Any = pos_y SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x) SCREAMING_SNAKE_CASE : Tuple = goal_x SCREAMING_SNAKE_CASE : List[str] = goal_y SCREAMING_SNAKE_CASE : Optional[Any] = g_cost SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : int = self.calculate_heuristic() SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ): '''simple docstring''' return self.f_cost < other.f_cost class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = [self.start] SCREAMING_SNAKE_CASE : list[Node] = [] SCREAMING_SNAKE_CASE : str = False def lowerCamelCase_ ( self : Any ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowerCamelCase_ ) self.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCamelCase_ ) else: self.open_nodes.append(lowerCamelCase_ ) return [self.start.pos] def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [] for action in delta: SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1] SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) ) return successors def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = node SCREAMING_SNAKE_CASE : List[str] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent path.reverse() return path class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = False def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCamelCase_ , lowerCamelCase_ ) self.fwd_astar.closed_nodes.append(lowerCamelCase_ ) self.bwd_astar.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node SCREAMING_SNAKE_CASE : Any = current_fwd_node SCREAMING_SNAKE_CASE : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop( astar.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCamelCase_ ) else: astar.open_nodes.append(lowerCamelCase_ ) return [self.fwd_astar.start.pos] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ ) bwd_path.pop() bwd_path.reverse() SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCAmelCase = (0, 0) __UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCAmelCase = time.time() __UpperCAmelCase = AStar(init, goal) __UpperCAmelCase = a_star.search() __UpperCAmelCase = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') __UpperCAmelCase = time.time() __UpperCAmelCase = BidirectionalAStar(init, goal) __UpperCAmelCase = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
323
0
"""simple docstring""" from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class _SCREAMING_SNAKE_CASE : def __lowerCAmelCase ( self , __A ) -> int: raise NotImplementedError() def __lowerCAmelCase ( self ) -> Optional[Any]: raise NotImplementedError() class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A , __A = False , **__A ) -> int: lowerCAmelCase_ :List[str] = tokenizer lowerCAmelCase_ :Dict = skip_prompt lowerCAmelCase_ :Union[str, Any] = decode_kwargs # variables used in the streaming process lowerCAmelCase_ :Any = [] lowerCAmelCase_ :List[Any] = 0 lowerCAmelCase_ :List[Any] = True def __lowerCAmelCase ( self , __A ) -> str: if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("""TextStreamer only supports batch size 1""" ) elif len(value.shape ) > 1: lowerCAmelCase_ :Tuple = value[0] if self.skip_prompt and self.next_tokens_are_prompt: lowerCAmelCase_ :int = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) lowerCAmelCase_ :int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("""\n""" ): lowerCAmelCase_ :List[str] = text[self.print_len :] lowerCAmelCase_ :Tuple = [] lowerCAmelCase_ :Tuple = 0 # If the last token is a CJK character, we print the characters. elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ): lowerCAmelCase_ :Union[str, Any] = text[self.print_len :] self.print_len += len(__A ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: lowerCAmelCase_ :Tuple = text[self.print_len : text.rfind(""" """ ) + 1] self.print_len += len(__A ) self.on_finalized_text(__A ) def __lowerCAmelCase ( self ) -> Dict: # Flush the cache, if it exists if len(self.token_cache ) > 0: lowerCAmelCase_ :Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) lowerCAmelCase_ :List[str] = text[self.print_len :] lowerCAmelCase_ :Optional[int] = [] lowerCAmelCase_ :int = 0 else: lowerCAmelCase_ :Tuple = """""" lowerCAmelCase_ :int = True self.on_finalized_text(__A , stream_end=__A ) def __lowerCAmelCase ( self , __A , __A = False ) -> Optional[Any]: print(__A , flush=__A , end="""""" if not stream_end else None ) def __lowerCAmelCase ( self , __A ) -> Dict: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E_00 and cp <= 0x9F_FF) or (cp >= 0x34_00 and cp <= 0x4D_BF) # or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) # or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) # or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) # or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) # or (cp >= 0xF9_00 and cp <= 0xFA_FF) or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) # ): # return True return False class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A , __A = False , __A = None , **__A ) -> Dict: super().__init__(__A , __A , **__A ) lowerCAmelCase_ :Union[str, Any] = Queue() lowerCAmelCase_ :Any = None lowerCAmelCase_ :List[Any] = timeout def __lowerCAmelCase ( self , __A , __A = False ) -> List[str]: self.text_queue.put(__A , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self ) -> Optional[Any]: return self def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :int = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
84
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''efficientnet''' def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : int = width_coefficient SCREAMING_SNAKE_CASE : List[str] = depth_coefficient SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor SCREAMING_SNAKE_CASE : List[str] = kernel_sizes SCREAMING_SNAKE_CASE : Dict = in_channels SCREAMING_SNAKE_CASE : List[str] = out_channels SCREAMING_SNAKE_CASE : Any = depthwise_padding SCREAMING_SNAKE_CASE : Dict = strides SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats SCREAMING_SNAKE_CASE : Any = expand_ratios SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dim SCREAMING_SNAKE_CASE : List[str] = pooling_type SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : Any = batch_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum SCREAMING_SNAKE_CASE : Dict = dropout_rate SCREAMING_SNAKE_CASE : int = drop_connect_rate SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4 class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return 1e-5
323
0
'''simple docstring''' def UpperCamelCase_( snake_case : str ): '''simple docstring''' snake_case_ = 0 # if input_string is "aba" than new_input_string become "a|b|a" snake_case_ = "" snake_case_ = "" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(snake_case ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring snake_case_ , snake_case_ = 0, 0 # length[i] shows the length of palindromic substring with center i snake_case_ = [1 for i in range(len(snake_case ) )] # for each character in new_string find corresponding palindromic string snake_case_ = 0 for j in range(len(snake_case ) ): snake_case_ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(snake_case ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 snake_case_ = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: snake_case_ = j - k + 1 # noqa: E741 snake_case_ = j + k - 1 # update max_length and start position if max_length < length[j]: snake_case_ = length[j] snake_case_ = j # create that string snake_case_ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
85
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {} SCREAMING_SNAKE_CASE : List[Any] = {} if prompt is not None: SCREAMING_SNAKE_CASE : List[Any] = prompt if generate_kwargs is not None: SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: SCREAMING_SNAKE_CASE : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ): '''simple docstring''' return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ ) if prompt is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type if model_type == "git": SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: SCREAMING_SNAKE_CASE : Optional[Any] = None return model_inputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ): '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): SCREAMING_SNAKE_CASE : List[str] = None if generate_kwargs is None: SCREAMING_SNAKE_CASE : int = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name ) SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ ) return model_outputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] for output_ids in model_outputs: SCREAMING_SNAKE_CASE : List[Any] = { """generated_text""": self.tokenizer.decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , ) } records.append(lowerCamelCase_ ) return records
323
0
"""simple docstring""" import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , ): __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Optional[int] = out_indices if out_indices is not None else [4] __lowerCAmelCase : Optional[int] = stage_names __lowerCAmelCase : Optional[Any] = out_features __lowerCAmelCase : Any = backbone __lowerCAmelCase : List[Any] = batch_size __lowerCAmelCase : List[str] = image_size __lowerCAmelCase : str = num_channels __lowerCAmelCase : int = use_pretrained_backbone __lowerCAmelCase : Optional[Any] = is_training def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase : str = self.get_config() return config, pixel_values def __lowerCamelCase ( self ): return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = TimmBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = config_and_inputs __lowerCAmelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class A__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase): A_ : Tuple = (TimmBackbone,) if is_torch_available() else () A_ : Dict = {'feature-extraction': TimmBackbone} if is_torch_available() else {} A_ : Optional[int] = False A_ : List[str] = False A_ : str = False A_ : Dict = False def __lowerCamelCase ( self ): __lowerCAmelCase : int = TimmBackboneModelTester(self ) __lowerCAmelCase : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = 'resnet18' __lowerCAmelCase : Optional[int] = 'microsoft/resnet-18' __lowerCAmelCase : Optional[Any] = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , use_timm_backbone=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __lowerCAmelCase : int = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , use_timm_backbone=_SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] ) __lowerCAmelCase : Optional[int] = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone initialization is managed on the timm side' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' ) def __lowerCamelCase ( self ): pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __lowerCamelCase ( self ): pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __lowerCamelCase ( self ): pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __lowerCamelCase ( self ): pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __lowerCamelCase ( self ): pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.' ) def __lowerCamelCase ( self ): pass @unittest.skip('Safetensors is not supported by timm.' ) def __lowerCamelCase ( self ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase : List[str] = [*signature.parameters.keys()] __lowerCAmelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : Any = True __lowerCAmelCase : Optional[Any] = self.has_attentions # no need to test all models as different heads yield the same functionality __lowerCAmelCase : Any = self.all_model_classes[0] __lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = outputs[0][-1] # Encoder-/Decoder-only models __lowerCAmelCase : Dict = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __lowerCAmelCase : Tuple = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __lowerCAmelCase : Optional[int] = copy.deepcopy(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = None __lowerCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : int = model(**_SCREAMING_SNAKE_CASE ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __lowerCAmelCase : List[Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = False __lowerCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Tuple = model(**_SCREAMING_SNAKE_CASE )
86
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,) SCREAMING_SNAKE_CASE__ = 10 def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = { """num_train_timesteps""": 2_01, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**lowerCamelCase_ ) return config def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0] SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1] SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 1 scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = scheduler.timesteps SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCamelCase_ ): # 1. scale model input SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0] scheduler.set_timesteps(timesteps=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Dict = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0] with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowerCamelCase_ )
323
0
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase = logging.getLogger(__name__) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int): return (preds == labels).mean() @dataclass class snake_case_ : __A : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __A : Optional[str] = field( default=__A ,metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __A : Optional[str] = field( default=__A ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __A : Optional[str] = field( default=__A ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,) @dataclass class snake_case_ : __A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) __A : str = field(metadata={"help": "Should contain the data files for the task."} ) __A : int = field( default=128 ,metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } ,) __A : bool = field( default=__A ,metadata={"help": "Overwrite the cached training and evaluation sets"} ) def lowercase_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) lowercase__ , lowercase__ , lowercase__ : Optional[Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome.") # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , _lowerCamelCase) # Set seed set_seed(training_args.seed) try: lowercase__ : Optional[int] = processors[data_args.task_name]() lowercase__ : Union[str, Any] = processor.get_labels() lowercase__ : Union[str, Any] = len(_lowerCamelCase) except KeyError: raise ValueError("Task not found: %s" % (data_args.task_name)) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase__ : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowercase__ : int = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase__ : Optional[int] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , ) # Get datasets lowercase__ : List[str] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase__ : Any = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(_lowerCamelCase : EvalPrediction) -> Dict: lowercase__ : int = np.argmax(p.predictions , axis=1) return {"acc": simple_accuracy(_lowerCamelCase , p.label_ids)} # Data collator lowercase__ : Dict = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8) if training_args.fpaa else None # Initialize our Trainer lowercase__ : int = Trainer( model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , data_collator=_lowerCamelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation lowercase__ : List[str] = {} if training_args.do_eval: logger.info("*** Evaluate ***") lowercase__ : Any = trainer.evaluate() lowercase__ : List[str] = os.path.join(training_args.output_dir , "eval_results.txt") if trainer.is_world_master(): with open(_lowerCamelCase , "w") as writer: logger.info("***** Eval results *****") for key, value in result.items(): logger.info(" %s = %s" , _lowerCamelCase , _lowerCamelCase) writer.write("%s = %s\n" % (key, value)) results.update(_lowerCamelCase) return results def lowercase_ ( _lowerCamelCase : List[Any]): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
87
'''simple docstring''' from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ): '''simple docstring''' super().__init__( lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE : Optional[int] = Text( cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[str] = None self.builder.download_and_prepare( download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE : int = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory ) return dataset
323
0
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance __lowerCAmelCase : str = 637_8137.0 __lowerCAmelCase : Optional[Any] = 635_6752.31_4245 __lowerCAmelCase : List[str] = 6378137 def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __magic_name__ = atan((1 - flattening) * tan(radians(A_ ) ) ) __magic_name__ = atan((1 - flattening) * tan(radians(A_ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __magic_name__ = haversine_distance(A_, A_, A_, A_ ) / EQUATORIAL_RADIUS # Intermediate P and Q values __magic_name__ = (b_lata + b_lata) / 2 __magic_name__ = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __magic_name__ = (sin(A_ ) ** 2) * (cos(A_ ) ** 2) __magic_name__ = cos(sigma / 2 ) ** 2 __magic_name__ = (sigma - sin(A_ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __magic_name__ = (cos(A_ ) ** 2) * (sin(A_ ) ** 2) __magic_name__ = sin(sigma / 2 ) ** 2 __magic_name__ = (sigma + sin(A_ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
88
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 3_84 SCREAMING_SNAKE_CASE : Union[str, Any] = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE : List[str] = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2) SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE : Any = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE : int = 1_28 SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE : Optional[Any] = 12 SCREAMING_SNAKE_CASE : str = 5_12 elif "large" in model_name: SCREAMING_SNAKE_CASE : Tuple = 1_92 SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48) SCREAMING_SNAKE_CASE : Tuple = 12 SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68 # set label information SCREAMING_SNAKE_CASE : List[str] = 1_50 SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files""" SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig( embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) SCREAMING_SNAKE_CASE : List[str] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = val def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :] # fmt: on def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = x.shape[0] SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = x.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCamelCase_ , param.shape ) SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ ) if "bn" in key: SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" ) SCREAMING_SNAKE_CASE : Optional[Any] = val # rename keys SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ ) if "norm" in key: SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor() SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCAmelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
323
0
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: _a , _a : int = len(lowerCAmelCase_ ), len(grid[0] ) if ( min(lowerCAmelCase_ , lowerCAmelCase_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _a : int = 0 count += depth_first_search(lowerCAmelCase_ , row + 1 , lowerCAmelCase_ , lowerCAmelCase_ ) count += depth_first_search(lowerCAmelCase_ , row - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) count += depth_first_search(lowerCAmelCase_ , lowerCAmelCase_ , col + 1 , lowerCAmelCase_ ) count += depth_first_search(lowerCAmelCase_ , lowerCAmelCase_ , col - 1 , lowerCAmelCase_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
89
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Any = pad_token_id SCREAMING_SNAKE_CASE : List[Any] = max_length SCREAMING_SNAKE_CASE : Optional[int] = vocab SCREAMING_SNAKE_CASE : List[Any] = merges SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab() return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ): '''simple docstring''' return cls(**lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ ) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs( lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
323
0
import os import sys import unittest __A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __A = os.path.join("tests", "models", "bert", "test_modeling_bert.py") __A = os.path.join("tests", "models", "blip", "test_modeling_blip.py") class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = get_test_to_tester_mapping(lowerCamelCase__ ) __lowerCamelCase = get_test_to_tester_mapping(lowerCamelCase__ ) __lowerCamelCase = {'BertModelTest': 'BertModelTester'} __lowerCamelCase = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = get_model_to_test_mapping(lowerCamelCase__ ) __lowerCamelCase = get_model_to_test_mapping(lowerCamelCase__ ) __lowerCamelCase = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } __lowerCamelCase = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = get_model_to_tester_mapping(lowerCamelCase__ ) __lowerCamelCase = get_model_to_tester_mapping(lowerCamelCase__ ) __lowerCamelCase = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } __lowerCamelCase = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
90
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train""" SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Union[str, Any] = streaming SCREAMING_SNAKE_CASE : Optional[int] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Any ): '''simple docstring''' pass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : int = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Tuple = streaming SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Dict ): '''simple docstring''' pass
323
0
"""simple docstring""" def _A (__a , __a ) -> float: """simple docstring""" def get_matched_characters(__a , __a ) -> str: SCREAMING_SNAKE_CASE_ : str = [] SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): SCREAMING_SNAKE_CASE_ : List[Any] = int(max(0 , i - limit ) ) SCREAMING_SNAKE_CASE_ : List[Any] = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(__a ) SCREAMING_SNAKE_CASE_ : Dict = f'{_stra[0:_stra.index(__a )]} {_stra[_stra.index(__a ) + 1:]}' return "".join(__a ) # matching characters SCREAMING_SNAKE_CASE_ : List[str] = get_matched_characters(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[int] = get_matched_characters(__a , __a ) SCREAMING_SNAKE_CASE_ : int = len(__a ) # transposition SCREAMING_SNAKE_CASE_ : Tuple = ( len([(ca, ca) for ca, ca in zip(__a , __a ) if ca != ca] ) // 2 ) if not match_count: SCREAMING_SNAKE_CASE_ : List[Any] = 0.0 else: SCREAMING_SNAKE_CASE_ : Optional[Any] = ( 1 / 3 * ( match_count / len(__a ) + match_count / len(__a ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters SCREAMING_SNAKE_CASE_ : Optional[Any] = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler("""hello""", """world"""))
91
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small""" SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = """en_speaker_1""" SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string""" SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json""" SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings""" def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) SCREAMING_SNAKE_CASE : List[str] = 35 SCREAMING_SNAKE_CASE : List[Any] = 2 SCREAMING_SNAKE_CASE : int = 8 SCREAMING_SNAKE_CASE : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string ) SCREAMING_SNAKE_CASE : Tuple = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
323
0
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function UpperCamelCase__ = 1.054571817E-34 # unit of ℏ : J * s UpperCamelCase__ = 3E8 # unit of c : m * s^-1 def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: __lowerCAmelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_40 * (distance) ** 4 ) return {"force": force} elif area == 0: __lowerCAmelCase = (2_40 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __lowerCAmelCase = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
92
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCAmelCase = logging.getLogger(__name__) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return (preds == labels).mean() @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ ) # Set seed set_seed(training_args.seed ) try: SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]() SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels() SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , ) # Get datasets SCREAMING_SNAKE_CASE : Optional[Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE : Dict = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase_ ) -> Dict: SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )} # Data collator SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE : Any = Trainer( model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE : Optional[Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate() SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase_ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(lowerCamelCase_ ) return results def __A ( lowerCamelCase_ ): """simple docstring""" main() if __name__ == "__main__": main()
323
0
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ): lowerCAmelCase_ = RobertaTokenizer lowerCAmelCase_ = RobertaTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = {'''cls_token''': '''<s>'''} def _snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase_ : Any = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowercase_ : List[str] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) lowercase_ : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowercase_ : Union[str, Any] = {'''unk_token''': '''<unk>'''} lowercase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) ) def _snake_case ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _snake_case ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : List[Any] = '''lower newer''' lowercase_ : Dict = '''lower newer''' return input_text, output_text def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase_ : List[Any] = '''lower newer''' lowercase_ : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowercase_ : Union[str, Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = tokens + [tokenizer.unk_token] lowercase_ : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : List[str] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def _snake_case ( self ): """simple docstring""" lowercase_ : List[Any] = self.tokenizer_class.from_pretrained('''roberta-base''' ) lowercase_ : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) lowercase_ : str = tokenizer.encode( '''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) lowercase_ : str = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) lowercase_ : str = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _snake_case ( self ): """simple docstring""" lowercase_ : Tuple = self.get_tokenizer() lowercase_ : Optional[Any] = '''Encode this sequence.''' lowercase_ : Optional[Any] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments lowercase_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) lowercase_ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) lowercase_ : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Testing spaces after special tokens lowercase_ : Any = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space lowercase_ : List[Any] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) lowercase_ : int = '''Encode <mask> sequence''' lowercase_ : List[Any] = '''Encode <mask>sequence''' lowercase_ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = encoded.index(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = encoded.index(__SCREAMING_SNAKE_CASE ) lowercase_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" pass def _snake_case ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowercase_ : str = '''A, <mask> AllenNLP sentence.''' lowercase_ : Optional[int] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) lowercase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) lowercase_ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def _snake_case ( self ): """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) lowercase_ : List[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE ) self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE ) self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : Optional[Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` lowercase_ : int = F'''{text_of_1_token} {text_of_1_token}''' lowercase_ : Dict = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , ) lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , ) lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , ) lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , ) lowercase_ : List[str] = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowercase_ : str = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , ) lowercase_ : Any = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , ) lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
93
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block SCREAMING_SNAKE_CASE : int = torch.nn.Convad( lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] ) # down SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0] for i, down_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = output_channel SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i] SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block( lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) self.down_blocks.append(lowerCamelCase_ ) # mid SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # out SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU() SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Tuple = False def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = x SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[Any] ): def custom_forward(*lowerCamelCase_ : List[str] ): return module(*lowerCamelCase_ ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ ) else: # down for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ ) # post-process SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : int = layers_per_block SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad( lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] ) SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None # mid SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # up SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : List[Any] = get_up_block( lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , ) self.up_blocks.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = output_channel # out if norm_type == "spatial": SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : Dict = nn.SiLU() SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Dict = False def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = z SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[str] ): def custom_forward(*lowerCamelCase_ : str ): return module(*lowerCamelCase_ ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ ) # post-process if latent_embeds is None: SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = n_e SCREAMING_SNAKE_CASE : int = vq_embed_dim SCREAMING_SNAKE_CASE : Tuple = beta SCREAMING_SNAKE_CASE : Union[str, Any] = legacy SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) SCREAMING_SNAKE_CASE : Optional[Any] = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0] SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed SCREAMING_SNAKE_CASE : Any = self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: SCREAMING_SNAKE_CASE : Optional[int] = n_e SCREAMING_SNAKE_CASE : Any = sane_index_shape def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 ) SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1 if self.unknown_index == "random": SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: SCREAMING_SNAKE_CASE : Any = self.unknown_index return new.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) if self.re_embed > self.used.shape[0]: # extra token SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ ) return back.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous() SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 ) SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape ) SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : List[str] = None # compute loss for embedding if not self.legacy: SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach() # reshape back to match original input shape SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): '''simple docstring''' if self.remap is not None: SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again # get quantized latent vectors SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ ) if shape is not None: SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ ) # reshape back to match original input shape SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = parameters SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 ) SCREAMING_SNAKE_CASE : Dict = deterministic SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar ) SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar ) if self.deterministic: SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = randn_tensor( self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample return x def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.mean
323
0
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : List[Any] = logging.get_logger(__name__) # TODO Update this snake_case : Union[str, Any] = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'esm' def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1026 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ): super().__init__(pad_token_id=_lowerCamelCase , mask_token_id=_lowerCamelCase , **_lowerCamelCase ) a :Tuple = vocab_size a :List[str] = hidden_size a :int = num_hidden_layers a :int = num_attention_heads a :Union[str, Any] = intermediate_size a :Union[str, Any] = hidden_dropout_prob a :Any = attention_probs_dropout_prob a :List[Any] = max_position_embeddings a :str = initializer_range a :Tuple = layer_norm_eps a :Union[str, Any] = position_embedding_type a :List[str] = use_cache a :str = emb_layer_norm_before a :List[str] = token_dropout a :str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) a :Optional[Any] = EsmFoldConfig() elif isinstance(_lowerCamelCase , _lowerCamelCase ): a :Dict = EsmFoldConfig(**_lowerCamelCase ) a :Optional[Any] = esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) a :str = get_default_vocab_list() else: a :Dict = vocab_list else: a :Tuple = None a :List[str] = None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , _lowerCamelCase ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def SCREAMING_SNAKE_CASE__ ( self ): a :Any = super().to_dict() if isinstance(self.esmfold_config , _lowerCamelCase ): a :Optional[Any] = self.esmfold_config.to_dict() return output @dataclass class _snake_case : SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = 128 SCREAMING_SNAKE_CASE__ = None def SCREAMING_SNAKE_CASE__ ( self ): if self.trunk is None: a :List[str] = TrunkConfig() elif isinstance(self.trunk , _lowerCamelCase ): a :List[Any] = TrunkConfig(**self.trunk ) def SCREAMING_SNAKE_CASE__ ( self ): a :Any = asdict(self ) a :Any = self.trunk.to_dict() return output @dataclass class _snake_case : SCREAMING_SNAKE_CASE__ = 48 SCREAMING_SNAKE_CASE__ = 1024 SCREAMING_SNAKE_CASE__ = 128 SCREAMING_SNAKE_CASE__ = 32 SCREAMING_SNAKE_CASE__ = 32 SCREAMING_SNAKE_CASE__ = 32 SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = 4 SCREAMING_SNAKE_CASE__ = 128 SCREAMING_SNAKE_CASE__ = None def SCREAMING_SNAKE_CASE__ ( self ): if self.structure_module is None: a :List[Any] = StructureModuleConfig() elif isinstance(self.structure_module , _lowerCamelCase ): a :Optional[int] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) a :Tuple = self.sequence_state_dim // self.sequence_head_width a :Union[str, Any] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = asdict(self ) a :Dict = self.structure_module.to_dict() return output @dataclass class _snake_case : SCREAMING_SNAKE_CASE__ = 384 SCREAMING_SNAKE_CASE__ = 128 SCREAMING_SNAKE_CASE__ = 16 SCREAMING_SNAKE_CASE__ = 128 SCREAMING_SNAKE_CASE__ = 12 SCREAMING_SNAKE_CASE__ = 4 SCREAMING_SNAKE_CASE__ = 8 SCREAMING_SNAKE_CASE__ = 0.1 SCREAMING_SNAKE_CASE__ = 8 SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 2 SCREAMING_SNAKE_CASE__ = 7 SCREAMING_SNAKE_CASE__ = 10 SCREAMING_SNAKE_CASE__ = 1e-8 SCREAMING_SNAKE_CASE__ = 1e5 def SCREAMING_SNAKE_CASE__ ( self ): return asdict(self ) def __lowerCamelCase ( ): """simple docstring""" return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
94
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : str = 3 SCREAMING_SNAKE_CASE : List[Any] = (32, 32) SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input return init_dict, inputs_dict
323
0
from __future__ import annotations from math import pow, sqrt def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ): """simple docstring""" if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance == 0: return {"resistance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) + pow(SCREAMING_SNAKE_CASE , 2 ) )} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
95
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' pass def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ ) import datasets SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) SCREAMING_SNAKE_CASE : Any = depth_estimator( [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] ) self.assertEqual( [ {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, ] , lowerCamelCase_ , ) @require_tf @unittest.skip("""Depth estimation is not implemented in TF""" ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large""" SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
323
0
"""simple docstring""" def _snake_case ( lowercase__ ): assert column_title.isupper() _lowerCamelCase : List[str] = 0 _lowerCamelCase : Any = len(lowercase__ ) - 1 _lowerCamelCase : Optional[Any] = 0 while index >= 0: _lowerCamelCase : Union[str, Any] = (ord(column_title[index] ) - 64) * pow(26 , lowercase__ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
96
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18} SCREAMING_SNAKE_CASE : Optional[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = min_resolution SCREAMING_SNAKE_CASE : List[str] = max_resolution SCREAMING_SNAKE_CASE : str = do_resize SCREAMING_SNAKE_CASE : Optional[Any] = size SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize SCREAMING_SNAKE_CASE : List[Any] = image_mean SCREAMING_SNAKE_CASE : str = image_std def lowerCamelCase_ ( self : Any ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' pass def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
323
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class lowercase : """simple docstring""" def __init__( self , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=64 , UpperCamelCase_=None ): '''simple docstring''' UpperCamelCase__ :Optional[int] = np.random.default_rng(UpperCamelCase_ ) UpperCamelCase__ :Tuple = length UpperCamelCase__ :Optional[int] = rng.normal(size=(length,) ).astype(np.floataa ) UpperCamelCase__ :str = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ): '''simple docstring''' return self.length def __getitem__( self , UpperCamelCase_ ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class lowercase ( torch.nn.Module ): """simple docstring""" def __init__( self , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=False ): '''simple docstring''' super().__init__() UpperCamelCase__ :Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) UpperCamelCase__ :List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) UpperCamelCase__ :Tuple = True def lowerCAmelCase__ ( self , UpperCamelCase_=None ): '''simple docstring''' if self.first_batch: print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) UpperCamelCase__ :Tuple = False return x * self.a[0] + self.b[0] class lowercase ( torch.nn.Module ): """simple docstring""" def __init__( self , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=False ): '''simple docstring''' super().__init__() UpperCamelCase__ :List[str] = torch.nn.Parameter(torch.tensor(UpperCamelCase_ ).float() ) UpperCamelCase__ :List[Any] = torch.nn.Parameter(torch.tensor(UpperCamelCase_ ).float() ) UpperCamelCase__ :str = True def lowerCAmelCase__ ( self , UpperCamelCase_=None ): '''simple docstring''' if self.first_batch: print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) UpperCamelCase__ :Optional[int] = False return x * self.a + self.b def a ( __a , __a = 16 ) -> Union[str, Any]: '''simple docstring''' from datasets import load_dataset from transformers import AutoTokenizer UpperCamelCase__ :Any = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCamelCase__ :Tuple = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} UpperCamelCase__ :Dict = load_dataset('''csv''' , data_files=__a ) UpperCamelCase__ :int = datasets['''train'''].unique('''label''' ) UpperCamelCase__ :List[str] = {v: i for i, v in enumerate(__a )} def tokenize_function(__a ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase__ :Any = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a , padding='''max_length''' ) if "label" in examples: UpperCamelCase__ :str = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCamelCase__ :str = datasets.map( __a , batched=__a , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__a ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. UpperCamelCase__ :int = DataLoader(tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=2 ) UpperCamelCase__ :Dict = DataLoader(tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=1 ) return train_dataloader, eval_dataloader
97
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCAmelCase = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
323
0
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ : str = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Any = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
98
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
323
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase : Any = { """configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""], """feature_extraction_whisper""": ["""WhisperFeatureExtractor"""], """processing_whisper""": ["""WhisperProcessor"""], """tokenization_whisper""": ["""WhisperTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = ["""WhisperTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ """WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """WhisperForConditionalGeneration""", """WhisperModel""", """WhisperPreTrainedModel""", """WhisperForAudioClassification""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ """TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWhisperForConditionalGeneration""", """TFWhisperModel""", """TFWhisperPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = [ """FlaxWhisperForConditionalGeneration""", """FlaxWhisperModel""", """FlaxWhisperPreTrainedModel""", """FlaxWhisperForAudioClassification""", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
99
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number | (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number & ~(1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number ^ (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return ((number >> position) & 1) == 1 def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
323
0
"""simple docstring""" class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self , lowerCAmelCase__): # we need a list not a string, so do something to change the type __SCREAMING_SNAKE_CASE = arr.split(""",""") def snake_case_ ( self): __SCREAMING_SNAKE_CASE = [int(self.array[0])] * len(self.array) __SCREAMING_SNAKE_CASE = [int(self.array[0])] * len(self.array) for i in range(1 , len(self.array)): __SCREAMING_SNAKE_CASE = max( int(self.array[i]) + sum_value[i - 1] , int(self.array[i])) __SCREAMING_SNAKE_CASE = max(sum_value[i] , rear[i - 1]) return rear[len(self.array) - 1] if __name__ == "__main__": __magic_name__ = input("please input some numbers:") __magic_name__ = SubArray(whole_array) __magic_name__ = array.solve_sub_array() print(("the results is:", re))
100
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[int] = batch_size SCREAMING_SNAKE_CASE : Any = seq_length SCREAMING_SNAKE_CASE : List[str] = is_training SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : List[Any] = rotary_dim SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = vocab_size - 1 SCREAMING_SNAKE_CASE : str = vocab_size - 1 SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1 def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : List[str] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 20 SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : str = model( input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 20 SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Dict = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @tooslow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id SCREAMING_SNAKE_CASE : str = jax.jit(model.generate ) SCREAMING_SNAKE_CASE : str = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = fx_state with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Tuple = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase_ )
323
0
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowercase : def __init__( self ,A__ ,A__=1_3 ,A__=3_0 ,A__=2 ,A__=3 ,A__=True ,A__=True ,A__=3_2 ,A__=2 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=1_0 ,A__=0.02 ,A__=3 ,A__=0.6 ,A__=None ,): lowercase = parent lowercase = batch_size lowercase = image_size lowercase = patch_size lowercase = num_channels lowercase = is_training lowercase = use_labels lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = type_sequence_label_size lowercase = initializer_range lowercase = mask_ratio lowercase = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase = (image_size // patch_size) ** 2 lowercase = int(math.ceil((1 - mask_ratio) * (num_patches + 1))) def A__ ( self): lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size) lowercase = self.get_config() return config, pixel_values, labels def A__ ( self): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A__ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def A__ ( self ,A__ ,A__ ,A__): lowercase = TFViTMAEModel(config=A__) lowercase = model(A__ ,training=A__) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size)) def A__ ( self ,A__ ,A__ ,A__): lowercase = TFViTMAEForPreTraining(A__) lowercase = model(A__ ,training=A__) # expected sequence length = num_patches lowercase = (self.image_size // self.patch_size) ** 2 lowercase = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels)) # test greyscale images lowercase = 1 lowercase = TFViTMAEForPreTraining(A__) lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) lowercase = model(A__ ,training=A__) lowercase = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels)) def A__ ( self): lowercase = self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase)) = config_and_inputs lowercase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): lowercase_ : Any =(TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () lowercase_ : str ={'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {} lowercase_ : Tuple =False lowercase_ : Optional[int] =False lowercase_ : Dict =False lowercase_ : Tuple =False def A__ ( self): lowercase = TFViTMAEModelTester(self) lowercase = ConfigTester(self ,config_class=A__ ,has_text_modality=A__ ,hidden_size=3_7) def A__ ( self): self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''') def A__ ( self): pass def A__ ( self): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(A__) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer)) lowercase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A__ ,tf.keras.layers.Layer)) def A__ ( self): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(A__) lowercase = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,A__) def A__ ( self): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__) def A__ ( self): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*A__) def A__ ( self): # make the mask reproducible np.random.seed(2) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = int((config.image_size // config.patch_size) ** 2) lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) for model_class in self.all_model_classes: lowercase = model_class(A__) lowercase = self._prepare_for_class(A__ ,A__) lowercase = model(A__ ,noise=A__) lowercase = copy.deepcopy(self._prepare_for_class(A__ ,A__)) lowercase = model(**A__ ,noise=A__) lowercase = outputs_dict[0].numpy() lowercase = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)) ,1E-6) def A__ ( self): # make the mask reproducible np.random.seed(2) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = int((config.image_size // config.patch_size) ** 2) lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) def prepare_numpy_arrays(A__): lowercase = {} for k, v in inputs_dict.items(): if tf.is_tensor(A__): lowercase = v.numpy() else: lowercase = np.array(A__) return inputs_np_dict for model_class in self.all_model_classes: lowercase = model_class(A__) lowercase = self._prepare_for_class(A__ ,A__) lowercase = prepare_numpy_arrays(A__) lowercase = model(A__ ,noise=A__) lowercase = model(**A__ ,noise=A__) self.assert_outputs_same(A__ ,A__) def A__ ( self ,A__ ,A__ ,A__): # make masks reproducible np.random.seed(2) lowercase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2) lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) lowercase = tf.constant(A__) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase = tf_noise super().check_pt_tf_models(A__ ,A__ ,A__) def A__ ( self): # make mask reproducible np.random.seed(2) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(A__) if module_member_name.endswith('''MainLayer''') # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''')] == model_class.__name__[: -len('''Model''')] for module_member in (getattr(A__ ,A__),) if isinstance(A__ ,A__) and tf.keras.layers.Layer in module_member.__bases__ and getattr(A__ ,'''_keras_serializable''' ,A__) } lowercase = int((config.image_size // config.patch_size) ** 2) lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) lowercase = tf.convert_to_tensor(A__) inputs_dict.update({'''noise''': noise}) for main_layer_class in tf_main_layer_classes: lowercase = main_layer_class(A__) lowercase = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype) for name, tensor in inputs_dict.items() } lowercase = tf.keras.Model(A__ ,outputs=main_layer(A__)) lowercase = model(A__) with tempfile.TemporaryDirectory() as tmpdirname: lowercase = os.path.join(A__ ,'''keras_model.h5''') model.save(A__) lowercase = tf.keras.models.load_model( A__ ,custom_objects={main_layer_class.__name__: main_layer_class}) assert isinstance(A__ ,tf.keras.Model) lowercase = model(A__) self.assert_outputs_same(A__ ,A__) @slow def A__ ( self): # make mask reproducible np.random.seed(2) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = int((config.image_size // config.patch_size) ** 2) lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) for model_class in self.all_model_classes: lowercase = model_class(A__) lowercase = self._prepare_for_class(A__ ,A__) lowercase = model(A__ ,noise=A__) if model_class.__name__ == "TFViTMAEModel": lowercase = outputs.last_hidden_state.numpy() lowercase = 0 else: lowercase = outputs.logits.numpy() lowercase = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(A__ ,saved_model=A__) lowercase = model_class.from_pretrained(A__) lowercase = model(A__ ,noise=A__) if model_class.__name__ == "TFViTMAEModel": lowercase = after_outputs['''last_hidden_state'''].numpy() lowercase = 0 else: lowercase = after_outputs['''logits'''].numpy() lowercase = 0 lowercase = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(A__ ,1E-5) def A__ ( self): # make mask reproducible np.random.seed(2) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = int((config.image_size // config.patch_size) ** 2) lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) for model_class in self.all_model_classes: lowercase = model_class(A__) lowercase = self._prepare_for_class(A__ ,A__) lowercase = model(A__ ,noise=A__) lowercase = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(A__) lowercase = model_class.from_config(model.get_config()) # make sure it also accepts a normal config lowercase = model_class.from_config(model.config) lowercase = new_model(A__) # Build model new_model.set_weights(model.get_weights()) lowercase = new_model(A__ ,noise=A__) self.assert_outputs_same(A__ ,A__) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''') def A__ ( self): pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''') def A__ ( self): pass @slow def A__ ( self): lowercase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''') self.assertIsNotNone(A__) def UpperCamelCase ( ): '''simple docstring''' lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowercase ( unittest.TestCase ): @cached_property def A__ ( self): return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''') if is_vision_available() else None @slow def A__ ( self): # make random mask reproducible across the PT and TF model np.random.seed(2) lowercase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''') lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=A__ ,return_tensors='''tf''') # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase = ViTMAEConfig() lowercase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2) lowercase = np.random.uniform(size=(1, num_patches)) # forward pass lowercase = model(**A__ ,noise=A__) # verify the logits lowercase = tf.convert_to_tensor([1, 1_9_6, 7_6_8]) self.assertEqual(outputs.logits.shape ,A__) lowercase = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]]) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,A__ ,atol=1E-4)
101
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' f''' `n_embd`: {n_embd} are not equal.''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim SCREAMING_SNAKE_CASE : Tuple = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : str = ( nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : Any = GPTaConfig( vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 ) SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ): '''simple docstring''' return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.encode_prefix(lowerCamelCase_ ) @torch.no_grad() def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 ) SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : Tuple = [] for feature in features: SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature # Only support beam search for now SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam( input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = eos_token_id SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool ) if input_embeds is not None: SCREAMING_SNAKE_CASE : Dict = input_embeds else: SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ ) for i in range(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = outputs.logits SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log() if scores is None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: SCREAMING_SNAKE_CASE : List[Any] = next_tokens else: SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] ) SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 ) else: SCREAMING_SNAKE_CASE : Tuple = -float(np.inf ) SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits seq_lengths[~is_stopped] += 1 SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1] SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source] SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1] SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 ) SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source] SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source] SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source] SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 ) SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze() if is_stopped.all(): break SCREAMING_SNAKE_CASE : int = scores / seq_lengths SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ ) # tokens tensors are already padded to max_seq_length SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order] SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
323
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """microsoft/unispeech-large-1500h-cv""": ( """https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json""" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='unispeech' def __init__(self , a_=32 , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.1 , a_=0.1 , a_=0.02 , a_=1E-5 , a_="group" , a_="gelu" , a_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , a_=(5, 2, 2, 2, 2, 2, 2) , a_=(10, 3, 3, 3, 3, 2, 2) , a_=False , a_=1_28 , a_=16 , a_=False , a_=True , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_=3_20 , a_=2 , a_=0.1 , a_=1_00 , a_=2_56 , a_=2_56 , a_=0.1 , a_="mean" , a_=False , a_=False , a_=2_56 , a_=80 , a_=0 , a_=1 , a_=2 , a_=0.5 , **a_ , ): '''simple docstring''' super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ ) __snake_case : Union[str, Any] = hidden_size __snake_case : List[str] = feat_extract_norm __snake_case : Optional[Any] = feat_extract_activation __snake_case : str = list(a_ ) __snake_case : Dict = list(a_ ) __snake_case : Any = list(a_ ) __snake_case : Tuple = conv_bias __snake_case : str = num_conv_pos_embeddings __snake_case : List[Any] = num_conv_pos_embedding_groups __snake_case : Tuple = len(self.conv_dim ) __snake_case : Any = num_hidden_layers __snake_case : Dict = intermediate_size __snake_case : Optional[Any] = hidden_act __snake_case : Optional[Any] = num_attention_heads __snake_case : Tuple = hidden_dropout __snake_case : Union[str, Any] = attention_dropout __snake_case : List[str] = activation_dropout __snake_case : str = feat_proj_dropout __snake_case : Optional[Any] = final_dropout __snake_case : Dict = layerdrop __snake_case : str = layer_norm_eps __snake_case : Optional[Any] = initializer_range __snake_case : Optional[int] = num_ctc_classes __snake_case : int = vocab_size __snake_case : Tuple = do_stable_layer_norm __snake_case : str = use_weighted_layer_sum __snake_case : str = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __snake_case : Dict = apply_spec_augment __snake_case : Optional[int] = mask_time_prob __snake_case : Tuple = mask_time_length __snake_case : Any = mask_time_min_masks __snake_case : int = mask_feature_prob __snake_case : Optional[Any] = mask_feature_length __snake_case : Union[str, Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __snake_case : Dict = num_codevectors_per_group __snake_case : int = num_codevector_groups __snake_case : Optional[Any] = contrastive_logits_temperature __snake_case : str = feat_quantizer_dropout __snake_case : Union[str, Any] = num_negatives __snake_case : int = codevector_dim __snake_case : Any = proj_codevector_dim __snake_case : Optional[int] = diversity_loss_weight # ctc loss __snake_case : Any = ctc_loss_reduction __snake_case : Optional[Any] = ctc_zero_infinity # pretraining loss __snake_case : List[str] = replace_prob @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
102
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git_vision_model''' def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : str = attention_dropout SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : List[str] = hidden_act @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ): '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git''' def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ ) if vision_config is None: SCREAMING_SNAKE_CASE : Any = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : Tuple = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings SCREAMING_SNAKE_CASE : int = num_image_with_embedding SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id SCREAMING_SNAKE_CASE : str = eos_token_id def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
323
0
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __snake_case ( UpperCamelCase_ ,unittest.TestCase ): _a = BertTokenizer _a = BertTokenizerFast _a = True _a = True _a = filter_non_english def UpperCAmelCase__ ( self : Optional[Any]): super().setUp() lowerCAmelCase_ : Dict = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) def UpperCAmelCase__ ( self : int , A_ : Any): lowerCAmelCase_ : str = '''UNwant\u00E9d,running''' lowerCAmelCase_ : Union[str, Any] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Union[str, Any]): lowerCAmelCase_ : Optional[int] = self.tokenizer_class(self.vocab_file) lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''UNwant\u00E9d,running''') self.assertListEqual(A_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_) , [9, 6, 7, 1_2, 1_0, 1_1]) def UpperCAmelCase__ ( self : int): if not self.test_rust_tokenizer: return lowerCAmelCase_ : Tuple = self.get_tokenizer() lowerCAmelCase_ : str = self.get_rust_tokenizer() lowerCAmelCase_ : Optional[Any] = '''UNwant\u00E9d,running''' lowerCAmelCase_ : Any = tokenizer.tokenize(A_) lowerCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(A_) self.assertListEqual(A_ , A_) lowerCAmelCase_ : str = tokenizer.encode(A_ , add_special_tokens=A_) lowerCAmelCase_ : int = rust_tokenizer.encode(A_ , add_special_tokens=A_) self.assertListEqual(A_ , A_) lowerCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer() lowerCAmelCase_ : Optional[int] = tokenizer.encode(A_) lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(A_) self.assertListEqual(A_ , A_) # With lower casing lowerCAmelCase_ : Optional[Any] = self.get_tokenizer(do_lower_case=A_) lowerCAmelCase_ : Tuple = self.get_rust_tokenizer(do_lower_case=A_) lowerCAmelCase_ : Dict = '''UNwant\u00E9d,running''' lowerCAmelCase_ : Tuple = tokenizer.tokenize(A_) lowerCAmelCase_ : Any = rust_tokenizer.tokenize(A_) self.assertListEqual(A_ , A_) lowerCAmelCase_ : Any = tokenizer.encode(A_ , add_special_tokens=A_) lowerCAmelCase_ : List[str] = rust_tokenizer.encode(A_ , add_special_tokens=A_) self.assertListEqual(A_ , A_) lowerCAmelCase_ : List[str] = self.get_rust_tokenizer() lowerCAmelCase_ : int = tokenizer.encode(A_) lowerCAmelCase_ : Tuple = rust_tokenizer.encode(A_) self.assertListEqual(A_ , A_) def UpperCAmelCase__ ( self : List[Any]): lowerCAmelCase_ : List[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') , ['''ah''', '''\u535A''', '''\u63A8''', '''zz''']) def UpperCAmelCase__ ( self : Optional[Any]): lowerCAmelCase_ : List[Any] = BasicTokenizer(do_lower_case=A_) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?''']) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello''']) def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : str = BasicTokenizer(do_lower_case=A_ , strip_accents=A_) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?''']) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''h\u00E9llo''']) def UpperCAmelCase__ ( self : Optional[int]): lowerCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=A_ , strip_accents=A_) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?''']) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello''']) def UpperCAmelCase__ ( self : Union[str, Any]): lowerCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=A_) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?''']) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello''']) def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : int = BasicTokenizer(do_lower_case=A_) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''']) def UpperCAmelCase__ ( self : Union[str, Any]): lowerCAmelCase_ : Any = BasicTokenizer(do_lower_case=A_ , strip_accents=A_) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''']) def UpperCAmelCase__ ( self : Dict): lowerCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=A_ , strip_accents=A_) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''']) def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=A_ , never_split=['''[UNK]''']) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]''']) def UpperCAmelCase__ ( self : Any): lowerCAmelCase_ : Dict = BasicTokenizer() lowerCAmelCase_ : str = '''a\n\'ll !!to?\'d of, can\'t.''' lowerCAmelCase_ : Dict = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.'''] self.assertListEqual(tokenizer.tokenize(A_) , A_) def UpperCAmelCase__ ( self : str): lowerCAmelCase_ : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] lowerCAmelCase_ : Optional[Any] = {} for i, token in enumerate(A_): lowerCAmelCase_ : Union[str, Any] = i lowerCAmelCase_ : Optional[Any] = WordpieceTokenizer(vocab=A_ , unk_token='''[UNK]''') self.assertListEqual(tokenizer.tokenize('''''') , []) self.assertListEqual(tokenizer.tokenize('''unwanted running''') , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing''']) self.assertListEqual(tokenizer.tokenize('''unwantedX running''') , ['''[UNK]''', '''runn''', '''##ing''']) def UpperCAmelCase__ ( self : str): self.assertTrue(_is_whitespace(''' ''')) self.assertTrue(_is_whitespace('''\t''')) self.assertTrue(_is_whitespace('''\r''')) self.assertTrue(_is_whitespace('''\n''')) self.assertTrue(_is_whitespace('''\u00A0''')) self.assertFalse(_is_whitespace('''A''')) self.assertFalse(_is_whitespace('''-''')) def UpperCAmelCase__ ( self : Tuple): self.assertTrue(_is_control('''\u0005''')) self.assertFalse(_is_control('''A''')) self.assertFalse(_is_control(''' ''')) self.assertFalse(_is_control('''\t''')) self.assertFalse(_is_control('''\r''')) def UpperCAmelCase__ ( self : List[str]): self.assertTrue(_is_punctuation('''-''')) self.assertTrue(_is_punctuation('''$''')) self.assertTrue(_is_punctuation('''`''')) self.assertTrue(_is_punctuation('''.''')) self.assertFalse(_is_punctuation('''A''')) self.assertFalse(_is_punctuation(''' ''')) def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ : Optional[int] = self.get_tokenizer() lowerCAmelCase_ : str = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(A_) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']]) self.assertListEqual( [rust_tokenizer.tokenize(A_) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']]) @slow def UpperCAmelCase__ ( self : Optional[Any]): lowerCAmelCase_ : str = self.tokenizer_class.from_pretrained('''bert-base-uncased''') lowerCAmelCase_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=A_) lowerCAmelCase_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A_) lowerCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(A_) lowerCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(A_ , A_) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def UpperCAmelCase__ ( self : Any): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): lowerCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(A_ , **A_) lowerCAmelCase_ : Union[str, Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" lowerCAmelCase_ : Tuple = tokenizer_r.encode_plus( A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , ) lowerCAmelCase_ : Tuple = tokenizer_r.do_lower_case if hasattr(A_ , '''do_lower_case''') else False lowerCAmelCase_ : List[str] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''Allen'''), ((2_1, 2_3), '''##NL'''), ((2_3, 2_4), '''##P'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''allen'''), ((2_1, 2_3), '''##nl'''), ((2_3, 2_4), '''##p'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''])) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping''']) def UpperCAmelCase__ ( self : Union[str, Any]): lowerCAmelCase_ : Dict = ['''的''', '''人''', '''有'''] lowerCAmelCase_ : str = ''''''.join(A_) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(A_ , **A_) lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(A_ , **A_) lowerCAmelCase_ : Dict = tokenizer_p.encode(A_ , add_special_tokens=A_) lowerCAmelCase_ : Union[str, Any] = tokenizer_r.encode(A_ , add_special_tokens=A_) lowerCAmelCase_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(A_) lowerCAmelCase_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(A_) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(A_ , A_) self.assertListEqual(A_ , A_) lowerCAmelCase_ : int = False lowerCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(A_ , **A_) lowerCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained(A_ , **A_) lowerCAmelCase_ : Tuple = tokenizer_r.encode(A_ , add_special_tokens=A_) lowerCAmelCase_ : List[Any] = tokenizer_p.encode(A_ , add_special_tokens=A_) lowerCAmelCase_ : Dict = tokenizer_r.convert_ids_to_tokens(A_) lowerCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(A_) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase_ : int = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_) ] self.assertListEqual(A_ , A_) self.assertListEqual(A_ , A_)
103
'''simple docstring''' from manim import * class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 ) SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 ) SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i, rect in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 ) target.move_to(lowerCamelCase_ ) model_arr.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(lowerCamelCase_ ) self.add(*lowerCamelCase_ , *lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) disk.move_to([-4, -1.25, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 ) input.set_fill(lowerCamelCase_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 ) self.play(Write(lowerCamelCase_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 ) self.play(MoveToTarget(lowerCamelCase_ ) ) self.play(FadeOut(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02} self.play( Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) SCREAMING_SNAKE_CASE : Optional[int] = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) SCREAMING_SNAKE_CASE : Any = AnimationGroup( FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(lowerCamelCase_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: SCREAMING_SNAKE_CASE : Optional[Any] = 0.7 self.play( Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = a_c SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , ) SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) ) self.wait()
323
0
'''simple docstring''' def _A ( A__ ): """simple docstring""" __lowercase = 0 while len(A__ ) > 1: __lowercase = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): __lowercase = files.index(min(A__ ) ) temp += files[min_index] files.pop(A__ ) files.append(A__ ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
104
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = graph # mapping node to its parent in resulting breadth first tree SCREAMING_SNAKE_CASE : dict[str, str | None] = {} SCREAMING_SNAKE_CASE : List[str] = source_vertex def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex} SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue while queue: SCREAMING_SNAKE_CASE : str = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = vertex queue.append(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ ) if target_vertex_parent is None: SCREAMING_SNAKE_CASE : Tuple = ( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(lowerCamelCase_ ) return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}''' if __name__ == "__main__": __UpperCAmelCase = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
323
0
"""simple docstring""" from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata a : Union[str, Any] = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class __UpperCamelCase ( tr.AbstractTransform ): def __init__( self , lowerCAmelCase__ = " " ) -> str: a : Optional[Any] = sentence_delimiter def __a ( self , lowerCAmelCase__ ) -> Optional[int]: return list(lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ ) -> Optional[int]: a : Dict = [] for sent_idx, sentence in enumerate(lowerCAmelCase__ ): chars.extend(self.process_string(lowerCAmelCase__ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1: chars.append(self.sentence_delimiter ) return chars a : Optional[int] = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: a : Any = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) a : Tuple = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' a : Any = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' a : Optional[Any] = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def __a ( self ) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", "https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates", ] , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]: if concatenate_texts: return jiwer.compute_measures( lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"] a : str = 0 a : int = 0 for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ): a : Optional[int] = jiwer.compute_measures( lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
105
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCAmelCase = 0 __UpperCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCAmelCase = tuple[int, int] class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = pos_x SCREAMING_SNAKE_CASE : Any = pos_y SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x) SCREAMING_SNAKE_CASE : Tuple = goal_x SCREAMING_SNAKE_CASE : List[str] = goal_y SCREAMING_SNAKE_CASE : Optional[Any] = g_cost SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : int = self.calculate_heuristic() SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ): '''simple docstring''' return self.f_cost < other.f_cost class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = [self.start] SCREAMING_SNAKE_CASE : list[Node] = [] SCREAMING_SNAKE_CASE : str = False def lowerCamelCase_ ( self : Any ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowerCamelCase_ ) self.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCamelCase_ ) else: self.open_nodes.append(lowerCamelCase_ ) return [self.start.pos] def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [] for action in delta: SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1] SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) ) return successors def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = node SCREAMING_SNAKE_CASE : List[str] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent path.reverse() return path class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = False def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCamelCase_ , lowerCamelCase_ ) self.fwd_astar.closed_nodes.append(lowerCamelCase_ ) self.bwd_astar.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node SCREAMING_SNAKE_CASE : Any = current_fwd_node SCREAMING_SNAKE_CASE : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop( astar.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCamelCase_ ) else: astar.open_nodes.append(lowerCamelCase_ ) return [self.fwd_astar.start.pos] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ ) bwd_path.pop() bwd_path.reverse() SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCAmelCase = (0, 0) __UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCAmelCase = time.time() __UpperCAmelCase = AStar(init, goal) __UpperCAmelCase = a_star.search() __UpperCAmelCase = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') __UpperCAmelCase = time.time() __UpperCAmelCase = BidirectionalAStar(init, goal) __UpperCAmelCase = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
323
0
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = "Speech2TextFeatureExtractor" lowercase__ = "Speech2TextTokenizer" def __init__( self : Dict ,lowercase_ : Optional[Any] ,lowercase_ : int ): super().__init__(lowercase_ ,lowercase_ ) lowerCAmelCase__ : Optional[int] = self.feature_extractor lowerCAmelCase__ : List[str] = False def __call__( self : Union[str, Any] ,*lowercase_ : Any ,**lowercase_ : Any ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*lowercase_ ,**lowercase_ ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) lowerCAmelCase__ : Tuple = kwargs.pop('''raw_speech''' ) else: lowerCAmelCase__ : str = kwargs.pop('''audio''' ,lowercase_ ) lowerCAmelCase__ : Optional[Any] = kwargs.pop('''sampling_rate''' ,lowercase_ ) lowerCAmelCase__ : int = kwargs.pop('''text''' ,lowercase_ ) if len(lowercase_ ) > 0: lowerCAmelCase__ : int = args[0] lowerCAmelCase__ : Optional[int] = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: lowerCAmelCase__ : Optional[Any] = self.feature_extractor(lowercase_ ,*lowercase_ ,sampling_rate=lowercase_ ,**lowercase_ ) if text is not None: lowerCAmelCase__ : Optional[int] = self.tokenizer(lowercase_ ,**lowercase_ ) if text is None: return inputs elif audio is None: return encodings else: lowerCAmelCase__ : str = encodings['''input_ids'''] return inputs def __lowerCAmelCase ( self : List[Any] ,*lowercase_ : Dict ,**lowercase_ : Any ): return self.tokenizer.batch_decode(*lowercase_ ,**lowercase_ ) def __lowerCAmelCase ( self : Dict ,*lowercase_ : Optional[int] ,**lowercase_ : Union[str, Any] ): return self.tokenizer.decode(*lowercase_ ,**lowercase_ ) @contextmanager def __lowerCAmelCase ( self : Tuple ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) lowerCAmelCase__ : int = True lowerCAmelCase__ : List[str] = self.tokenizer yield lowerCAmelCase__ : Union[str, Any] = self.feature_extractor lowerCAmelCase__ : Dict = False
106
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''efficientnet''' def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : int = width_coefficient SCREAMING_SNAKE_CASE : List[str] = depth_coefficient SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor SCREAMING_SNAKE_CASE : List[str] = kernel_sizes SCREAMING_SNAKE_CASE : Dict = in_channels SCREAMING_SNAKE_CASE : List[str] = out_channels SCREAMING_SNAKE_CASE : Any = depthwise_padding SCREAMING_SNAKE_CASE : Dict = strides SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats SCREAMING_SNAKE_CASE : Any = expand_ratios SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dim SCREAMING_SNAKE_CASE : List[str] = pooling_type SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : Any = batch_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum SCREAMING_SNAKE_CASE : Dict = dropout_rate SCREAMING_SNAKE_CASE : int = drop_connect_rate SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4 class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return 1e-5
323
0
import gc import threading import time import psutil import torch class snake_case__ : """simple docstring""" def __init__( self : Any ) -> Optional[int]: a = psutil.Process() a = False def __UpperCAmelCase ( self : str ) -> str: a = -1 while True: a = max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: a = True a = threading.Thread(target=self.peak_monitor ) a = True self.thread.start() def __UpperCAmelCase ( self : Any ) -> Optional[int]: a = False self.thread.join() return self.cpu_memory_peak __lowerCAmelCase : List[str] = PeakCPUMemory() def __magic_name__ ( ): '''simple docstring''' a = {"time": time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem a = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): a = torch.cuda.memory_allocated(A ) torch.cuda.reset_peak_memory_stats() return measures def __magic_name__ ( A : List[str] ): '''simple docstring''' a = {"time": time.time() - start_measures["time"]} gc.collect() torch.cuda.empty_cache() # CPU mem a = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20 a = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20 # GPU mem for i in range(torch.cuda.device_count() ): a = (torch.cuda.memory_allocated(A ) - start_measures[str(A )]) / 2**20 a = (torch.cuda.max_memory_allocated(A ) - start_measures[str(A )]) / 2**20 return measures def __magic_name__ ( A : List[Any], A : Any ): '''simple docstring''' print(F"""{description}:""" ) print(F"""- Time: {measures["time"]:.2f}s""" ) for i in range(torch.cuda.device_count() ): print(F"""- GPU {i} allocated: {measures[str(A )]:.2f}MiB""" ) a = measures[F"""{i}-peak"""] print(F"""- GPU {i} peak: {peak:.2f}MiB""" ) print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" ) print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
107
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {} SCREAMING_SNAKE_CASE : List[Any] = {} if prompt is not None: SCREAMING_SNAKE_CASE : List[Any] = prompt if generate_kwargs is not None: SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: SCREAMING_SNAKE_CASE : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ): '''simple docstring''' return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ ) if prompt is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type if model_type == "git": SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: SCREAMING_SNAKE_CASE : Optional[Any] = None return model_inputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ): '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): SCREAMING_SNAKE_CASE : List[str] = None if generate_kwargs is None: SCREAMING_SNAKE_CASE : int = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name ) SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ ) return model_outputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] for output_ids in model_outputs: SCREAMING_SNAKE_CASE : List[Any] = { """generated_text""": self.tokenizer.decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , ) } records.append(lowerCamelCase_ ) return records
323
0
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ = { '''facebook/dpr-ctx_encoder-single-nq-base''': 512, '''facebook/dpr-ctx_encoder-multiset-base''': 512, } lowerCAmelCase__ = { '''facebook/dpr-question_encoder-single-nq-base''': 512, '''facebook/dpr-question_encoder-multiset-base''': 512, } lowerCAmelCase__ = { '''facebook/dpr-reader-single-nq-base''': 512, '''facebook/dpr-reader-multiset-base''': 512, } lowerCAmelCase__ = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase__ = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Tuple =VOCAB_FILES_NAMES a : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP a : Union[str, Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION a : Optional[int] =DPRContextEncoderTokenizer class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Any =VOCAB_FILES_NAMES a : int =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP a : Tuple =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Union[str, Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION a : Tuple =DPRQuestionEncoderTokenizer lowerCAmelCase__ = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase__ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase__ = r''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(lowercase ) class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = None , **snake_case__ , ): """simple docstring""" if titles is None and texts is None: return super().__call__( snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , ) elif titles is None or texts is None: lowerCAmelCase : Dict = titles if texts is None else texts return super().__call__( snake_case__ , snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , ) lowerCAmelCase : Any = titles if not isinstance(snake_case__ , snake_case__ ) else [titles] lowerCAmelCase : Optional[int] = texts if not isinstance(snake_case__ , snake_case__ ) else [texts] lowerCAmelCase : Optional[int] = len(snake_case__ ) lowerCAmelCase : Dict = questions if not isinstance(snake_case__ , snake_case__ ) else [questions] * n_passages assert len(snake_case__ ) == len( snake_case__ ), f"""There should be as many titles than texts but got {len(snake_case__ )} titles and {len(snake_case__ )} texts.""" lowerCAmelCase : Tuple = super().__call__(snake_case__ , snake_case__ , padding=snake_case__ , truncation=snake_case__ )["input_ids"] lowerCAmelCase : Union[str, Any] = super().__call__(snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ )["input_ids"] lowerCAmelCase : Optional[Any] = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(snake_case__ , snake_case__ ) ] } if return_attention_mask is not False: lowerCAmelCase : Any = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowerCAmelCase : Optional[int] = attention_mask return self.pad(snake_case__ , padding=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = 16 , snake_case__ = 64 , snake_case__ = 4 , ): """simple docstring""" lowerCAmelCase : List[str] = reader_input["input_ids"] lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = reader_output[:3] lowerCAmelCase : int = len(snake_case__ ) lowerCAmelCase : List[str] = sorted(range(snake_case__ ) , reverse=snake_case__ , key=relevance_logits.__getitem__ ) lowerCAmelCase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: lowerCAmelCase : Tuple = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowerCAmelCase : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCAmelCase : int = sequence_ids.index(self.pad_token_id ) else: lowerCAmelCase : List[str] = len(snake_case__ ) lowerCAmelCase : int = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case__ , top_spans=snake_case__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case__ , start_index=snake_case__ , end_index=snake_case__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(snake_case__ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): """simple docstring""" lowerCAmelCase : str = [] for start_index, start_score in enumerate(snake_case__ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowerCAmelCase : Any = sorted(snake_case__ , key=lambda snake_case__ : x[1] , reverse=snake_case__ ) lowerCAmelCase : int = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" lowerCAmelCase : Tuple = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(snake_case__ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(lowercase ) class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ): """simple docstring""" a : int =VOCAB_FILES_NAMES a : Optional[int] =READER_PRETRAINED_VOCAB_FILES_MAP a : int =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : List[Any] =READER_PRETRAINED_INIT_CONFIGURATION a : int =["input_ids", "attention_mask"] a : Optional[int] =DPRReaderTokenizer
108
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,) SCREAMING_SNAKE_CASE__ = 10 def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = { """num_train_timesteps""": 2_01, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**lowerCamelCase_ ) return config def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0] SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1] SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 1 scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = scheduler.timesteps SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCamelCase_ ): # 1. scale model input SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0] scheduler.set_timesteps(timesteps=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Dict = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0] with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowerCamelCase_ )
323
0
"""simple docstring""" import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 13 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE=[16, 32, 64, 128] , _SCREAMING_SNAKE_CASE = 7 , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 37 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = [2, 2, 2, 2] , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Any = image_size UpperCAmelCase : Tuple = patch_size UpperCAmelCase : int = num_channels UpperCAmelCase : List[Any] = is_training UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Optional[Any] = num_hidden_layers UpperCAmelCase : Union[str, Any] = num_attention_heads UpperCAmelCase : List[str] = intermediate_size UpperCAmelCase : Dict = hidden_act UpperCAmelCase : int = hidden_dropout_prob UpperCAmelCase : Dict = attention_probs_dropout_prob UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : Union[str, Any] = encoder_stride UpperCAmelCase : Dict = num_attention_outputs UpperCAmelCase : List[str] = embed_dim UpperCAmelCase : int = embed_dim + 1 UpperCAmelCase : Union[str, Any] = resolution UpperCAmelCase : Any = depths UpperCAmelCase : int = hidden_sizes UpperCAmelCase : List[str] = dim UpperCAmelCase : Optional[Any] = mlp_expansion_ratio def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Dict = None if self.use_labels: UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Any = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Optional[int] = TFEfficientFormerModel(config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' UpperCAmelCase : Any = self.type_sequence_label_size UpperCAmelCase : Optional[Any] = TFEfficientFormerForImageClassification(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase : List[str] = 1 UpperCAmelCase : Optional[Any] = TFEfficientFormerForImageClassification(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = config_and_inputs UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Optional[int] = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) __lowerCAmelCase : Optional[int] = ( { 'feature-extraction': TFEfficientFormerModel, 'image-classification': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) __lowerCAmelCase : Any = False __lowerCAmelCase : str = False __lowerCAmelCase : Union[str, Any] = False __lowerCAmelCase : Dict = False __lowerCAmelCase : Tuple = False def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[Any] = TFEfficientFormerModelTester(self ) UpperCAmelCase : List[str] = ConfigTester( self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' pass @unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Any = [*signature.parameters.keys()] UpperCAmelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase : Optional[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) if hasattr(self.model_tester , """encoder_seq_length""" ): UpperCAmelCase : str = self.model_tester.encoder_seq_length if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1: UpperCAmelCase : List[Any] = seq_length * self.model_tester.chunk_length else: UpperCAmelCase : str = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: UpperCAmelCase : Dict = outputs.decoder_hidden_states self.asseretIsInstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = getattr(self.model_tester , """seq_length""" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = getattr(self.model_tester , """decoder_seq_length""" , _SCREAMING_SNAKE_CASE ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Optional[int] = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : Optional[int] = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict: '''simple docstring''' UpperCAmelCase : Dict = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @slow def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] = TFEfficientFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[Any] = True UpperCAmelCase : Tuple = getattr(self.model_tester , """seq_length""" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[Any] = getattr(self.model_tester , """encoder_seq_length""" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = getattr(self.model_tester , """key_length""" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = getattr(self.model_tester , """chunk_length""" , _SCREAMING_SNAKE_CASE ) if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ): UpperCAmelCase : Dict = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = True UpperCAmelCase : str = False UpperCAmelCase : List[Any] = True UpperCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase : Optional[Any] = True UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model UpperCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes UpperCAmelCase : Optional[int] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_SCREAMING_SNAKE_CASE ) for key, val in model.input_signature.items() if key in model.dummy_inputs } UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ) self.assertTrue(outputs_dict is not None ) def _snake_case ( ): UpperCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Any = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" ) UpperCAmelCase : List[str] = self.default_image_processor UpperCAmelCase : Optional[Any] = prepare_img() UpperCAmelCase : str = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) # forward pass UpperCAmelCase : List[Any] = model(**_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) # verify the logits UpperCAmelCase : str = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = tf.constant([-0.0555, 0.4825, -0.0852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase : str = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( """snap-research/efficientformer-l1-300""" ) UpperCAmelCase : str = self.default_image_processor UpperCAmelCase : List[Any] = prepare_img() UpperCAmelCase : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) # forward pass UpperCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) # verify the logits UpperCAmelCase : Dict = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[Any] = tf.constant([-0.1312, 0.4353, -1.0499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
109
'''simple docstring''' from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ): '''simple docstring''' super().__init__( lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE : Optional[int] = Text( cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[str] = None self.builder.download_and_prepare( download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE : int = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory ) return dataset
323
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __a ( lowercase_ ): __lowercase : List[str] = 'ctrl' __lowercase : Dict = ['past_key_values'] __lowercase : str = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , lowerCAmelCase__=246_534 , lowerCAmelCase__=256 , lowerCAmelCase__=1_280 , lowerCAmelCase__=8_192 , lowerCAmelCase__=48 , lowerCAmelCase__=16 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1E-6 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Tuple: '''simple docstring''' lowercase__: str = vocab_size lowercase__: Optional[Any] = n_positions lowercase__: str = n_embd lowercase__: List[str] = n_layer lowercase__: Optional[Any] = n_head lowercase__: Optional[int] = dff lowercase__: Tuple = resid_pdrop lowercase__: Optional[int] = embd_pdrop lowercase__: Dict = layer_norm_epsilon lowercase__: Any = initializer_range lowercase__: List[Any] = use_cache super().__init__(**lowerCamelCase_ )
196
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 3_84 SCREAMING_SNAKE_CASE : Union[str, Any] = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE : List[str] = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2) SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE : Any = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE : int = 1_28 SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE : Optional[Any] = 12 SCREAMING_SNAKE_CASE : str = 5_12 elif "large" in model_name: SCREAMING_SNAKE_CASE : Tuple = 1_92 SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48) SCREAMING_SNAKE_CASE : Tuple = 12 SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68 # set label information SCREAMING_SNAKE_CASE : List[str] = 1_50 SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files""" SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig( embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) SCREAMING_SNAKE_CASE : List[str] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = val def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :] # fmt: on def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = x.shape[0] SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = x.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCamelCase_ , param.shape ) SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ ) if "bn" in key: SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" ) SCREAMING_SNAKE_CASE : Optional[Any] = val # rename keys SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ ) if "norm" in key: SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor() SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCAmelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
323
0
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets a_ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' a_ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n' a_ = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def __magic_name__ ( self : Dict ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def __magic_name__ ( self : Dict , __lowercase : Dict , __lowercase : Optional[int] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =0.0 for i, j in zip(lowerCamelCase_ , lowerCamelCase_ ): n_correct += 1.0 if math_equivalence.is_equiv(lowerCamelCase_ , lowerCamelCase_ ) else 0.0 SCREAMING_SNAKE_CASE__ : str =n_correct / len(lowerCamelCase_ ) return { "accuracy": accuracy, }
152
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Any = pad_token_id SCREAMING_SNAKE_CASE : List[Any] = max_length SCREAMING_SNAKE_CASE : Optional[int] = vocab SCREAMING_SNAKE_CASE : List[Any] = merges SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab() return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ): '''simple docstring''' return cls(**lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ ) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs( lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
323
0
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :int ) -> Dict: __SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small""" __SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE : str = """en_speaker_1""" __SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string""" __SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json""" __SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings""" def __magic_name__( self :int , **lowerCAmelCase__ :int ) -> int: return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def __magic_name__( self :Union[str, Any] ) -> List[str]: shutil.rmtree(self.tmpdirname ) def __magic_name__( self :List[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def __magic_name__( self :List[Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def __magic_name__( self :Union[str, Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) __SCREAMING_SNAKE_CASE : List[str] = 35 __SCREAMING_SNAKE_CASE : List[Any] = 2 __SCREAMING_SNAKE_CASE : int = 8 __SCREAMING_SNAKE_CASE : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset __SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) __SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file __SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(lowerCamelCase_ , **lowerCamelCase_ ) __SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) __SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub __SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def __magic_name__( self :str ) -> str: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer( self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
9
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train""" SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Union[str, Any] = streaming SCREAMING_SNAKE_CASE : Optional[int] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Any ): '''simple docstring''' pass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : int = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Tuple = streaming SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Dict ): '''simple docstring''' pass
323
0
lowerCAmelCase__ : List[str] =[ '''VerificationMode''', '''Version''', '''disable_progress_bar''', '''enable_progress_bar''', '''is_progress_bar_enabled''', '''experimental''', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
257
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small""" SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = """en_speaker_1""" SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string""" SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json""" SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings""" def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) SCREAMING_SNAKE_CASE : List[str] = 35 SCREAMING_SNAKE_CASE : List[Any] = 2 SCREAMING_SNAKE_CASE : int = 8 SCREAMING_SNAKE_CASE : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string ) SCREAMING_SNAKE_CASE : Tuple = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
323
0
'''simple docstring''' import math def lowerCamelCase ( lowerCAmelCase : Tuple = 100 ): """simple docstring""" __magic_name__ : Any = sum(i * i for i in range(1 , n + 1 ) ) __magic_name__ : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F'{solution() = }')
331
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCAmelCase = logging.getLogger(__name__) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return (preds == labels).mean() @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ ) # Set seed set_seed(training_args.seed ) try: SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]() SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels() SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , ) # Get datasets SCREAMING_SNAKE_CASE : Optional[Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE : Dict = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase_ ) -> Dict: SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )} # Data collator SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE : Any = Trainer( model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE : Optional[Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate() SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase_ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(lowerCamelCase_ ) return results def __A ( lowerCamelCase_ ): """simple docstring""" main() if __name__ == "__main__": main()
323
0
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self :int , __magic_name__ :Any , __magic_name__ :List[Any]=13 , __magic_name__ :List[str]=7 , __magic_name__ :Any=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Dict=True , __magic_name__ :Any=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=16 , __magic_name__ :Dict=36 , __magic_name__ :List[str]=6 , __magic_name__ :List[str]=6 , __magic_name__ :str=6 , __magic_name__ :Union[str, Any]=37 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :List[Any]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[Any]=512 , __magic_name__ :List[Any]=16 , __magic_name__ :Any=2 , __magic_name__ :Dict=0.02 , __magic_name__ :Any=3 , __magic_name__ :int=4 , __magic_name__ :List[Any]=None , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = embedding_size a = hidden_size a = num_hidden_layers a = num_hidden_groups a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope def lowerCamelCase__ ( self :int ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[int] , __magic_name__ :Optional[int] , __magic_name__ :Any , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Tuple ): '''simple docstring''' a = AlbertModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) a = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) a = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :str , __magic_name__ :str , __magic_name__ :Tuple , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] , __magic_name__ :Dict ): '''simple docstring''' a = AlbertForPreTraining(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , sentence_order_label=lowerCamelCase_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCamelCase__ ( self :Any , __magic_name__ :List[str] , __magic_name__ :str , __magic_name__ :Any , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Dict , __magic_name__ :List[Any] ): '''simple docstring''' a = AlbertForMaskedLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :Any , __magic_name__ :Union[str, Any] , __magic_name__ :Dict , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Dict ): '''simple docstring''' a = AlbertForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self :Any , __magic_name__ :Any , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :str , __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :List[Any] ): '''simple docstring''' a = self.num_labels a = AlbertForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Any , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Optional[int] , __magic_name__ :Dict , __magic_name__ :Optional[int] ): '''simple docstring''' a = self.num_labels a = AlbertForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Dict ): '''simple docstring''' a = self.num_choices a = AlbertForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self :str ): '''simple docstring''' a = self.prepare_config_and_inputs() ( a ) = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase__ = ( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase__ = True def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Dict=False ): '''simple docstring''' a = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ ) if return_labels: if model_class in get_values(lowerCamelCase_ ): a = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ ) return inputs_dict def lowerCamelCase__ ( self :int ): '''simple docstring''' a = AlbertModelTester(self ) a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :str ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a = type self.model_tester.create_and_check_model(*lowerCamelCase_ ) @slow def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = AlbertModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = AlbertModel.from_pretrained("""albert-base-v2""" ) a = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0] a = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , lowerCamelCase_ ) a = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4 ) )
228
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block SCREAMING_SNAKE_CASE : int = torch.nn.Convad( lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] ) # down SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0] for i, down_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = output_channel SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i] SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block( lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) self.down_blocks.append(lowerCamelCase_ ) # mid SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # out SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU() SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Tuple = False def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = x SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[Any] ): def custom_forward(*lowerCamelCase_ : List[str] ): return module(*lowerCamelCase_ ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ ) else: # down for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ ) # post-process SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : int = layers_per_block SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad( lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] ) SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None # mid SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # up SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : List[Any] = get_up_block( lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , ) self.up_blocks.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = output_channel # out if norm_type == "spatial": SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : Dict = nn.SiLU() SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Dict = False def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = z SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[str] ): def custom_forward(*lowerCamelCase_ : str ): return module(*lowerCamelCase_ ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ ) # post-process if latent_embeds is None: SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = n_e SCREAMING_SNAKE_CASE : int = vq_embed_dim SCREAMING_SNAKE_CASE : Tuple = beta SCREAMING_SNAKE_CASE : Union[str, Any] = legacy SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) SCREAMING_SNAKE_CASE : Optional[Any] = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0] SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed SCREAMING_SNAKE_CASE : Any = self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: SCREAMING_SNAKE_CASE : Optional[int] = n_e SCREAMING_SNAKE_CASE : Any = sane_index_shape def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 ) SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1 if self.unknown_index == "random": SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: SCREAMING_SNAKE_CASE : Any = self.unknown_index return new.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) if self.re_embed > self.used.shape[0]: # extra token SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ ) return back.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous() SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 ) SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape ) SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : List[str] = None # compute loss for embedding if not self.legacy: SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach() # reshape back to match original input shape SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): '''simple docstring''' if self.remap is not None: SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again # get quantized latent vectors SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ ) if shape is not None: SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ ) # reshape back to match original input shape SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = parameters SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 ) SCREAMING_SNAKE_CASE : Dict = deterministic SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar ) SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar ) if self.deterministic: SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = randn_tensor( self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample return x def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.mean
323
0
"""simple docstring""" import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class __lowercase ( unittest.TestCase ): '''simple docstring''' def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): __a : List[Any] = jnp.ones((batch_size, length) ) / length return scores def _lowerCamelCase ( self ): __a : Any = None __a : Optional[int] = 20 __a : Tuple = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase_ ) # tweak scores to not be uniform anymore __a : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch __a : Any = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax __a : Optional[int] = jax.nn.softmax(lowerCamelCase_ , axis=-1 ) __a : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 ) __a : int = FlaxTemperatureLogitsWarper(temperature=1.3 ) __a : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 ) __a : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def _lowerCamelCase ( self ): __a : Optional[int] = None __a : str = 10 __a : List[str] = 2 # create ramp distribution __a : List[Any] = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() __a : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size __a : List[Any] = FlaxTopKLogitsWarper(3 ) __a : List[Any] = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case __a : Any = 5 __a : Optional[int] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) __a : int = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, length) ).copy() __a : int = top_k_warp_safety_check(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def _lowerCamelCase ( self ): __a : List[str] = None __a : int = 10 __a : Optional[int] = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) __a : str = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) ) __a : str = FlaxTopPLogitsWarper(0.8 ) __a : str = np.exp(top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 __a : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) # check edge cases with negative and extreme logits __a : Any = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme __a : Tuple = ramp_logits[1] * 1_0_0.0 # make sure at least 2 tokens are kept __a : Optional[Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) __a : Union[str, Any] = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def _lowerCamelCase ( self ): __a : List[Any] = 20 __a : Any = 4 __a : Dict = 0 __a : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ ) # check that min length is applied at length 5 __a : Dict = ids_tensor((batch_size, 20) , vocab_size=20 ) __a : Dict = 5 __a : List[str] = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) __a : Optional[int] = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] ) # check that min length is not applied anymore at length 15 __a : Union[str, Any] = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) __a : Optional[Any] = 15 __a : Optional[Any] = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() ) def _lowerCamelCase ( self ): __a : Tuple = 20 __a : Tuple = 4 __a : Union[str, Any] = 0 __a : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ ) # check that all scores are -inf except the bos_token_id score __a : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=20 ) __a : Tuple = 1 __a : Any = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) __a : str = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 __a : Optional[Any] = 3 __a : int = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) __a : Tuple = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() ) def _lowerCamelCase ( self ): __a : Tuple = 20 __a : int = 4 __a : Optional[int] = 0 __a : Tuple = 5 __a : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) # check that all scores are -inf except the eos_token_id when max_length is reached __a : str = ids_tensor((batch_size, 4) , vocab_size=20 ) __a : int = 4 __a : Optional[int] = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) __a : int = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached __a : List[Any] = 3 __a : Any = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) __a : Dict = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() ) def _lowerCamelCase ( self ): __a : List[Any] = 4 __a : Dict = 10 __a : Dict = 15 __a : Union[str, Any] = 2 __a : int = 1 __a : List[str] = 15 # dummy input_ids and scores __a : Any = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ ) __a : List[str] = input_ids.copy() __a : Tuple = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) __a : Union[str, Any] = scores.copy() # instantiate all dist processors __a : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 ) __a : Tuple = FlaxTopKLogitsWarper(3 ) __a : str = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors __a : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ ) __a : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ ) __a : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) __a : str = 10 # no processor list __a : Union[str, Any] = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) __a : Optional[int] = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) __a : List[Any] = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) __a : Any = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) __a : List[str] = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) __a : str = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # with processor list __a : Optional[Any] = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) __a : int = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def _lowerCamelCase ( self ): __a : Tuple = 4 __a : Tuple = 10 __a : Optional[int] = 15 __a : Union[str, Any] = 2 __a : Optional[int] = 1 __a : Union[str, Any] = 15 # dummy input_ids and scores __a : Union[str, Any] = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ ) __a : List[Any] = input_ids.copy() __a : Optional[Any] = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) __a : int = scores.copy() # instantiate all dist processors __a : int = FlaxTemperatureLogitsWarper(temperature=0.5 ) __a : Optional[int] = FlaxTopKLogitsWarper(3 ) __a : int = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors __a : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ ) __a : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ ) __a : str = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) __a : Optional[Any] = 10 # no processor list def run_no_processor_list(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : str = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) __a : Union[str, Any] = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) __a : int = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) __a : Union[str, Any] = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) __a : Optional[Any] = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) __a : int = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) return scores # with processor list def run_processor_list(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Any = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) __a : List[str] = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) return scores __a : Optional[Any] = jax.jit(lowerCamelCase_ ) __a : List[Any] = jax.jit(lowerCamelCase_ ) __a : str = jitted_run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) __a : Dict = jitted_run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
160
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : str = 3 SCREAMING_SNAKE_CASE : List[Any] = (32, 32) SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input return init_dict, inputs_dict
323
0
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging _a = logging.get_logger(__name__) class __lowerCamelCase : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = None @staticmethod def UpperCamelCase ( ): """simple docstring""" raise NotImplementedError def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" raise NotImplementedError def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" raise NotImplementedError def UpperCamelCase ( self ): """simple docstring""" if not self.is_available(): raise RuntimeError( F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def UpperCamelCase ( cls ): """simple docstring""" return F"""`pip install {cls.pip_package or cls.name}`""" class __lowerCamelCase ( lowercase_): """simple docstring""" UpperCamelCase__ = "optuna" @staticmethod def UpperCamelCase ( ): """simple docstring""" return is_optuna_available() def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" return default_hp_space_optuna(lowerCamelCase_ ) class __lowerCamelCase ( lowercase_): """simple docstring""" UpperCamelCase__ = "ray" UpperCamelCase__ = "\'ray[tune]\'" @staticmethod def UpperCamelCase ( ): """simple docstring""" return is_ray_available() def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" return default_hp_space_ray(lowerCamelCase_ ) class __lowerCamelCase ( lowercase_): """simple docstring""" UpperCamelCase__ = "sigopt" @staticmethod def UpperCamelCase ( ): """simple docstring""" return is_sigopt_available() def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" return default_hp_space_sigopt(lowerCamelCase_ ) class __lowerCamelCase ( lowercase_): """simple docstring""" UpperCamelCase__ = "wandb" @staticmethod def UpperCamelCase ( ): """simple docstring""" return is_wandb_available() def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" return default_hp_space_wandb(lowerCamelCase_ ) _a = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __A ( )-> Union[str, Any]: """simple docstring""" _UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowerCamelCase_ ) > 0: _UpperCAmelCase = available_backends[0].name if len(lowerCamelCase_ ) > 1: logger.info( F"""{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( 'No hyperparameter search backend available.\n' + '\n'.join( F""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
39
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' pass def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ ) import datasets SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) SCREAMING_SNAKE_CASE : Any = depth_estimator( [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] ) self.assertEqual( [ {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, ] , lowerCamelCase_ , ) @require_tf @unittest.skip("""Depth estimation is not implemented in TF""" ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large""" SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
323
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
153
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18} SCREAMING_SNAKE_CASE : Optional[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = min_resolution SCREAMING_SNAKE_CASE : List[str] = max_resolution SCREAMING_SNAKE_CASE : str = do_resize SCREAMING_SNAKE_CASE : Optional[Any] = size SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize SCREAMING_SNAKE_CASE : List[Any] = image_mean SCREAMING_SNAKE_CASE : str = image_std def lowerCamelCase_ ( self : Any ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' pass def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
323
0
from collections.abc import Iterable from typing import Generic, TypeVar _a = TypeVar('''_T''') class A_ ( Generic[_T] ): def __init__( self : str , UpperCAmelCase : Iterable[_T] | None = None ) -> int: __lowerCAmelCase: list[_T] = list(iterable or [] ) __lowerCAmelCase: list[_T] = [] def __len__( self : Tuple ) -> List[str]: return len(self._stacka ) + len(self._stacka ) def __repr__( self : List[str] ) -> Union[str, Any]: return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})''' def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : _T ) -> Optional[int]: self._stacka.append(lowerCamelCase_ ) def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: __lowerCAmelCase: List[str] = self._stacka.pop __lowerCAmelCase: List[str] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError('Queue is empty' ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
322
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCAmelCase = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
323
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin UpperCAmelCase__ = False @skip_mps class lowerCamelCase__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase): SCREAMING_SNAKE_CASE__ = StableDiffusionAttendAndExcitePipeline SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''}) SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def __A (cls ) -> Optional[Any]: super().setUpClass() torch.use_deterministic_algorithms(lowerCamelCase_ ) @classmethod def __A (cls ) -> str: super().tearDownClass() torch.use_deterministic_algorithms(lowerCamelCase_ ) def __A (self ) -> Optional[int]: torch.manual_seed(0 ) _lowercase =UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , ) _lowercase =DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) torch.manual_seed(0 ) _lowercase =AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) _lowercase =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) _lowercase =CLIPTextModel(lowerCamelCase_ ) _lowercase =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _lowercase ={ """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __A (self , UpperCAmelCase , UpperCAmelCase=0 ) -> Tuple: if str(lowerCamelCase_ ).startswith('''mps''' ): _lowercase =torch.manual_seed(lowerCamelCase_ ) else: _lowercase =torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) _lowercase ={ """prompt""": """a cat and a frog""", """token_indices""": [2, 5], """generator""": generator, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", """max_iter_to_alter""": 2, """thresholds""": {0: 0.7}, } return inputs def __A (self ) -> Tuple: _lowercase ="""cpu""" _lowercase =self.get_dummy_components() _lowercase =self.pipeline_class(**lowerCamelCase_ ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _lowercase =self.get_dummy_inputs(lowerCamelCase_ ) _lowercase =pipe(**lowerCamelCase_ ).images _lowercase =image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 6_4, 6_4, 3) ) _lowercase =np.array( [0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] ) _lowercase =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase_ , 1e-3 ) def __A (self ) -> List[str]: super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 ) def __A (self ) -> Union[str, Any]: self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __A (self ) -> Union[str, Any]: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 ) def __A (self ) -> Union[str, Any]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def __A (self ) -> Any: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 ) def __A (self ) -> List[str]: super().test_save_load_local(expected_max_difference=5e-4 ) def __A (self ) -> Any: super().test_save_load_optional_components(expected_max_difference=4e-4 ) @require_torch_gpu @slow class lowerCamelCase__ ( unittest.TestCase): @classmethod def __A (cls ) -> Tuple: super().setUpClass() torch.use_deterministic_algorithms(lowerCamelCase_ ) @classmethod def __A (cls ) -> Any: super().tearDownClass() torch.use_deterministic_algorithms(lowerCamelCase_ ) def __A (self ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def __A (self ) -> Optional[int]: _lowercase =torch.manual_seed(5_1 ) _lowercase =StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) _lowercase ="""a painting of an elephant with glasses""" _lowercase =[5, 7] _lowercase =pipe( prompt=lowerCamelCase_ , token_indices=lowerCamelCase_ , guidance_scale=7.5 , generator=lowerCamelCase_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0] _lowercase =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' ) assert np.abs((expected_image - image).max() ) < 5e-1
5
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
323
0
def snake_case_ ( snake_case ) -> Optional[Any]: lowercase__: Optional[Any] = hex_num.strip() if not hex_num: raise ValueError('No value was passed to the function' ) lowercase__: Optional[int] = hex_num[0] == """-""" if is_negative: lowercase__: List[str] = hex_num[1:] try: lowercase__: Union[str, Any] = int(lowerCamelCase_ , 16 ) except ValueError: raise ValueError('Invalid value was passed to the function' ) lowercase__: Optional[Any] = """""" while int_num > 0: lowercase__: Union[str, Any] = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('-' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
196
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number | (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number & ~(1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number ^ (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return ((number >> position) & 1) == 1 def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
323
0
'''simple docstring''' from collections.abc import Sequence def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int] ): '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(lowerCamelCase_ ) ) def _a( UpperCamelCase__ : Any, UpperCamelCase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =0.0 for coeff in reversed(lowerCamelCase_ ): SCREAMING_SNAKE_CASE__ : int =result * x + coeff return result if __name__ == "__main__": a_ = (0.0, 0.0, 5.0, 9.3, 7.0) a_ = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
152
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[int] = batch_size SCREAMING_SNAKE_CASE : Any = seq_length SCREAMING_SNAKE_CASE : List[str] = is_training SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : List[Any] = rotary_dim SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = vocab_size - 1 SCREAMING_SNAKE_CASE : str = vocab_size - 1 SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1 def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : List[str] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 20 SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : str = model( input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 20 SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Dict = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @tooslow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id SCREAMING_SNAKE_CASE : str = jax.jit(model.generate ) SCREAMING_SNAKE_CASE : str = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = fx_state with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Tuple = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase_ )
323
0
__lowerCAmelCase : Optional[int] =[ 'Audio', 'Array2D', 'Array3D', 'Array4D', 'Array5D', 'ClassLabel', 'Features', 'Sequence', 'Value', 'Image', 'Translation', 'TranslationVariableLanguages', ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
9
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' f''' `n_embd`: {n_embd} are not equal.''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim SCREAMING_SNAKE_CASE : Tuple = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : str = ( nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : Any = GPTaConfig( vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 ) SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ): '''simple docstring''' return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.encode_prefix(lowerCamelCase_ ) @torch.no_grad() def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 ) SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : Tuple = [] for feature in features: SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature # Only support beam search for now SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam( input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = eos_token_id SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool ) if input_embeds is not None: SCREAMING_SNAKE_CASE : Dict = input_embeds else: SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ ) for i in range(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = outputs.logits SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log() if scores is None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: SCREAMING_SNAKE_CASE : List[Any] = next_tokens else: SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] ) SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 ) else: SCREAMING_SNAKE_CASE : Tuple = -float(np.inf ) SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits seq_lengths[~is_stopped] += 1 SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1] SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source] SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1] SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 ) SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source] SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source] SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source] SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 ) SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze() if is_stopped.all(): break SCREAMING_SNAKE_CASE : int = scores / seq_lengths SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ ) # tokens tensors are already padded to max_seq_length SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order] SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
323
0
from typing import TYPE_CHECKING from ..utils import _LazyModule lowerCAmelCase__ : Dict ={ '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys lowerCAmelCase__ : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
257
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git_vision_model''' def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : str = attention_dropout SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : List[str] = hidden_act @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ): '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git''' def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ ) if vision_config is None: SCREAMING_SNAKE_CASE : Any = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : Tuple = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings SCREAMING_SNAKE_CASE : int = num_image_with_embedding SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id SCREAMING_SNAKE_CASE : str = eos_token_id def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
323
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase :Tuple = logging.get_logger(__name__) lowerCAmelCase :int = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _lowerCamelCase ( lowercase_ ): '''simple docstring''' A_ : Dict = """canine""" def __init__( self : Any , _A : Optional[int]=768 , _A : List[Any]=12 , _A : List[str]=12 , _A : List[str]=3072 , _A : List[Any]="gelu" , _A : Any=0.1 , _A : Any=0.1 , _A : List[str]=16384 , _A : List[str]=16 , _A : int=0.02 , _A : Optional[Any]=1E-12 , _A : Dict=0 , _A : List[Any]=0XE_0_0_0 , _A : str=0XE_0_0_1 , _A : Any=4 , _A : Optional[Any]=4 , _A : Union[str, Any]=8 , _A : Any=16384 , _A : Dict=128 , **_A : Any , ) -> Optional[int]: super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) __magic_name__ : Dict = max_position_embeddings __magic_name__ : List[Any] = hidden_size __magic_name__ : List[Any] = num_hidden_layers __magic_name__ : int = num_attention_heads __magic_name__ : List[Any] = intermediate_size __magic_name__ : Tuple = hidden_act __magic_name__ : List[Any] = hidden_dropout_prob __magic_name__ : List[Any] = attention_probs_dropout_prob __magic_name__ : Optional[Any] = initializer_range __magic_name__ : str = type_vocab_size __magic_name__ : Union[str, Any] = layer_norm_eps # Character config: __magic_name__ : Optional[Any] = downsampling_rate __magic_name__ : List[str] = upsampling_kernel_size __magic_name__ : Optional[Any] = num_hash_functions __magic_name__ : Tuple = num_hash_buckets __magic_name__ : Tuple = local_transformer_stride
331
'''simple docstring''' from manim import * class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 ) SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 ) SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i, rect in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 ) target.move_to(lowerCamelCase_ ) model_arr.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(lowerCamelCase_ ) self.add(*lowerCamelCase_ , *lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) disk.move_to([-4, -1.25, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 ) input.set_fill(lowerCamelCase_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 ) self.play(Write(lowerCamelCase_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 ) self.play(MoveToTarget(lowerCamelCase_ ) ) self.play(FadeOut(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02} self.play( Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) SCREAMING_SNAKE_CASE : Optional[int] = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) SCREAMING_SNAKE_CASE : Any = AnimationGroup( FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(lowerCamelCase_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: SCREAMING_SNAKE_CASE : Optional[Any] = 0.7 self.play( Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = a_c SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , ) SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) ) self.wait()
323
0
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __UpperCamelCase : Tuple = sys.version_info >= (3, 10) def __A ( __lowerCamelCase=None , __lowerCamelCase=None ) -> Dict: return field(default_factory=lambda: default , metadata=lowerCamelCase_ ) @dataclass class __lowerCAmelCase : UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @dataclass class __lowerCAmelCase : UpperCamelCase__ = 42 UpperCamelCase__ = field(default='''toto''' , metadata={'''help''': '''help message'''} ) @dataclass class __lowerCAmelCase : UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None class __lowerCAmelCase ( lowercase_ ): UpperCamelCase__ = '''titi''' UpperCamelCase__ = '''toto''' class __lowerCAmelCase ( lowercase_ ): UpperCamelCase__ = '''titi''' UpperCamelCase__ = '''toto''' UpperCamelCase__ = 42 @dataclass class __lowerCAmelCase : UpperCamelCase__ = '''toto''' def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = BasicEnum(self.foo ) @dataclass class __lowerCAmelCase : UpperCamelCase__ = '''toto''' def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = MixedTypeEnum(self.foo ) @dataclass class __lowerCAmelCase : UpperCamelCase__ = None UpperCamelCase__ = field(default=lowercase_ , metadata={'''help''': '''help message'''} ) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[] ) UpperCamelCase__ = list_field(default=[] ) @dataclass class __lowerCAmelCase : UpperCamelCase__ = list_field(default=[] ) UpperCamelCase__ = list_field(default=[1, 2, 3] ) UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class __lowerCAmelCase : UpperCamelCase__ = field() UpperCamelCase__ = field() UpperCamelCase__ = field() def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = BasicEnum(self.required_enum ) @dataclass class __lowerCAmelCase : UpperCamelCase__ = 42 UpperCamelCase__ = field() UpperCamelCase__ = None UpperCamelCase__ = field(default='''toto''' , metadata={'''help''': '''help message'''} ) UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) if is_python_no_less_than_3_10: @dataclass class __lowerCAmelCase : UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None @dataclass class __lowerCAmelCase : UpperCamelCase__ = None UpperCamelCase__ = field(default=lowercase_ , metadata={'''help''': '''help message'''} ) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[] ) UpperCamelCase__ = list_field(default=[] ) class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :argparse.ArgumentParser , __magic_name__ :argparse.ArgumentParser ): '''simple docstring''' self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): a = {k: v for k, v in vars(lowerCamelCase_ ).items() if k != """container"""} a = {k: v for k, v in vars(lowerCamelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , lowerCamelCase_ ) and yy.get("""choices""" , lowerCamelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](lowerCamelCase_ ) , yy["""type"""](lowerCamelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = HfArgumentParser(lowerCamelCase_ ) a = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=lowerCamelCase_ , required=lowerCamelCase_ ) expected.add_argument("""--bar""" , type=lowerCamelCase_ , required=lowerCamelCase_ ) expected.add_argument("""--baz""" , type=lowerCamelCase_ , required=lowerCamelCase_ ) expected.add_argument("""--flag""" , type=lowerCamelCase_ , default=lowerCamelCase_ , const=lowerCamelCase_ , nargs="""?""" ) self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ ) a = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] (a ) = parser.parse_args_into_dataclasses(lowerCamelCase_ , look_for_args_file=lowerCamelCase_ ) self.assertFalse(example.flag ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = HfArgumentParser(lowerCamelCase_ ) a = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=lowerCamelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=lowerCamelCase_ , help="""help message""" ) self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=lowerCamelCase_ , default=lowerCamelCase_ , const=lowerCamelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=lowerCamelCase_ , default=lowerCamelCase_ , const=lowerCamelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowerCamelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=lowerCamelCase_ , default=lowerCamelCase_ ) a = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(lowerCamelCase_ ) for dataclass_type in dataclass_types: a = HfArgumentParser(lowerCamelCase_ ) self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ ) a = parser.parse_args([] ) self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) ) a = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) ) a = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) ) a = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) ) a = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = HfArgumentParser(lowerCamelCase_ ) a = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ ) a = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) a = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) a = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) a = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) a = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) a = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' @dataclass class __lowerCAmelCase : UpperCamelCase__ = '''toto''' a = HfArgumentParser(lowerCamelCase_ ) a = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ ) a = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) a = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) a = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCamelCase__ ( self :str ): '''simple docstring''' a = HfArgumentParser(lowerCamelCase_ ) a = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowerCamelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowerCamelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowerCamelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowerCamelCase_ ) self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ ) a = parser.parse_args([] ) self.assertEqual( lowerCamelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) a = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(lowerCamelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=lowerCamelCase_ , type=lowerCamelCase_ ) expected.add_argument("""--bar""" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=lowerCamelCase_ , type=lowerCamelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowerCamelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowerCamelCase_ ) a = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(lowerCamelCase_ ) for dataclass_type in dataclass_types: a = HfArgumentParser(lowerCamelCase_ ) self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ ) a = parser.parse_args([] ) self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , bar=lowerCamelCase_ , baz=lowerCamelCase_ , ces=[] , des=[] ) ) a = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(lowerCamelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = HfArgumentParser(lowerCamelCase_ ) a = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=lowerCamelCase_ , required=lowerCamelCase_ ) expected.add_argument("""--required_str""" , type=lowerCamelCase_ , required=lowerCamelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowerCamelCase_ , ) self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = HfArgumentParser(lowerCamelCase_ ) a = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=lowerCamelCase_ , required=lowerCamelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowerCamelCase_ , ) expected.add_argument("""--opt""" , type=lowerCamelCase_ , default=lowerCamelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=lowerCamelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowerCamelCase_ ) self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = HfArgumentParser(lowerCamelCase_ ) a = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } a = parser.parse_dict(lowerCamelCase_ )[0] a = BasicExample(**lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = HfArgumentParser(lowerCamelCase_ ) a = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(lowerCamelCase_ , parser.parse_dict , lowerCamelCase_ , allow_extra_keys=lowerCamelCase_ ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = HfArgumentParser(lowerCamelCase_ ) a = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: a = os.path.join(lowerCamelCase_ , """temp_json""" ) os.mkdir(lowerCamelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ ) a = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] a = BasicExample(**lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = HfArgumentParser(lowerCamelCase_ ) a = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: a = os.path.join(lowerCamelCase_ , """temp_yaml""" ) os.mkdir(lowerCamelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(lowerCamelCase_ , lowerCamelCase_ ) a = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] a = BasicExample(**lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = HfArgumentParser(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ )
228
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = graph # mapping node to its parent in resulting breadth first tree SCREAMING_SNAKE_CASE : dict[str, str | None] = {} SCREAMING_SNAKE_CASE : List[str] = source_vertex def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex} SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue while queue: SCREAMING_SNAKE_CASE : str = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = vertex queue.append(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ ) if target_vertex_parent is None: SCREAMING_SNAKE_CASE : Tuple = ( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(lowerCamelCase_ ) return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}''' if __name__ == "__main__": __UpperCAmelCase = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
323
0
"""simple docstring""" import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) A = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='''relu''')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='''relu''')) classifier.add(layers.Dense(units=1, activation='''sigmoid''')) # Compiling the CNN classifier.compile( optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy'''] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') A = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) A = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) A = train_datagen.flow_from_directory( '''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) A = test_datagen.flow_from_directory( '''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('''cnn.h5''') # Part 3 - Making new predictions A = tf.keras.preprocessing.image.load_img( '''dataset/single_prediction/image.png''', target_size=(64, 64) ) A = tf.keras.preprocessing.image.img_to_array(test_image) A = np.expand_dims(test_image, axis=0) A = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: A = '''Normal''' if result[0][0] == 1: A = '''Abnormality detected'''
160
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCAmelCase = 0 __UpperCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCAmelCase = tuple[int, int] class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = pos_x SCREAMING_SNAKE_CASE : Any = pos_y SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x) SCREAMING_SNAKE_CASE : Tuple = goal_x SCREAMING_SNAKE_CASE : List[str] = goal_y SCREAMING_SNAKE_CASE : Optional[Any] = g_cost SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : int = self.calculate_heuristic() SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ): '''simple docstring''' return self.f_cost < other.f_cost class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = [self.start] SCREAMING_SNAKE_CASE : list[Node] = [] SCREAMING_SNAKE_CASE : str = False def lowerCamelCase_ ( self : Any ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowerCamelCase_ ) self.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCamelCase_ ) else: self.open_nodes.append(lowerCamelCase_ ) return [self.start.pos] def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [] for action in delta: SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1] SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) ) return successors def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = node SCREAMING_SNAKE_CASE : List[str] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent path.reverse() return path class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = False def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCamelCase_ , lowerCamelCase_ ) self.fwd_astar.closed_nodes.append(lowerCamelCase_ ) self.bwd_astar.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node SCREAMING_SNAKE_CASE : Any = current_fwd_node SCREAMING_SNAKE_CASE : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop( astar.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCamelCase_ ) else: astar.open_nodes.append(lowerCamelCase_ ) return [self.fwd_astar.start.pos] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ ) bwd_path.pop() bwd_path.reverse() SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCAmelCase = (0, 0) __UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCAmelCase = time.time() __UpperCAmelCase = AStar(init, goal) __UpperCAmelCase = a_star.search() __UpperCAmelCase = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') __UpperCAmelCase = time.time() __UpperCAmelCase = BidirectionalAStar(init, goal) __UpperCAmelCase = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
323
0
import argparse _a = '''docs/source/_static/js/custom.js''' def __A ( __lowerCAmelCase )-> str: """simple docstring""" with open(lowerCamelCase_ , encoding='utf-8' , newline='\n' ) as f: _UpperCAmelCase = f.readlines() _UpperCAmelCase = 0 # First let's put the right version while not lines[index].startswith('const stableVersion =' ): index += 1 _UpperCAmelCase = F"""const stableVersion = \"v{version}\"\n""" # Then update the dictionary while not lines[index].startswith('const versionMapping = {' ): index += 1 # We go until the end while not lines[index].startswith('}' ): index += 1 # We add the new version at the end lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n""" with open(lowerCamelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lowerCamelCase_ ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') _a = parser.parse_args() update_custom_js(args.version)
39
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''efficientnet''' def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : int = width_coefficient SCREAMING_SNAKE_CASE : List[str] = depth_coefficient SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor SCREAMING_SNAKE_CASE : List[str] = kernel_sizes SCREAMING_SNAKE_CASE : Dict = in_channels SCREAMING_SNAKE_CASE : List[str] = out_channels SCREAMING_SNAKE_CASE : Any = depthwise_padding SCREAMING_SNAKE_CASE : Dict = strides SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats SCREAMING_SNAKE_CASE : Any = expand_ratios SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dim SCREAMING_SNAKE_CASE : List[str] = pooling_type SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : Any = batch_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum SCREAMING_SNAKE_CASE : Dict = dropout_rate SCREAMING_SNAKE_CASE : int = drop_connect_rate SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4 class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return 1e-5
323
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowerCamelCase ( lowercase_ , unittest.TestCase ): UpperCAmelCase_ = KandinskyVaaPipeline UpperCAmelCase_ = [ "image_embeds", "negative_image_embeds", ] UpperCAmelCase_ = ["image_embeds", "negative_image_embeds"] UpperCAmelCase_ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] UpperCAmelCase_ = False @property def snake_case_ (self ) -> str: return 32 @property def snake_case_ (self ) -> Any: return 32 @property def snake_case_ (self ) -> List[str]: return self.time_input_dim @property def snake_case_ (self ) -> List[str]: return self.time_input_dim * 4 @property def snake_case_ (self ) -> Any: return 1_00 @property def snake_case_ (self ) -> str: torch.manual_seed(0 ) UpperCamelCase = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCamelCase = UNetaDConditionModel(**lowerCamelCase_ ) return model @property def snake_case_ (self ) -> Any: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case_ (self ) -> Tuple: torch.manual_seed(0 ) UpperCamelCase = VQModel(**self.dummy_movq_kwargs ) return model def snake_case_ (self ) -> Optional[int]: UpperCamelCase = self.dummy_unet UpperCamelCase = self.dummy_movq UpperCamelCase = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCamelCase_ , ) UpperCamelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def snake_case_ (self , __a , __a=0 ) -> Dict: UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ ) UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( lowerCamelCase_ ) if str(lowerCamelCase_ ).startswith("mps" ): UpperCamelCase = torch.manual_seed(lowerCamelCase_ ) else: UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) UpperCamelCase = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def snake_case_ (self ) -> Optional[Any]: UpperCamelCase = """cpu""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) ) UpperCamelCase = output.images UpperCamelCase = pipe( **self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0] UpperCamelCase = image[0, -3:, -3:, -1] UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase = np.array( [0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): def snake_case_ (self ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ (self ) -> Dict: UpperCamelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" ) UpperCamelCase = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(lowerCamelCase_ ) UpperCamelCase = KandinskyVaaPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) UpperCamelCase = pipeline.to(lowerCamelCase_ ) pipeline.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = """red cat, 4k photo""" UpperCamelCase = torch.Generator(device="cuda" ).manual_seed(0 ) UpperCamelCase = pipe_prior( lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase = torch.Generator(device="cuda" ).manual_seed(0 ) UpperCamelCase = pipeline( image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_00 , output_type="np" , ) UpperCamelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
153
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {} SCREAMING_SNAKE_CASE : List[Any] = {} if prompt is not None: SCREAMING_SNAKE_CASE : List[Any] = prompt if generate_kwargs is not None: SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: SCREAMING_SNAKE_CASE : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ): '''simple docstring''' return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ ) if prompt is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type if model_type == "git": SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: SCREAMING_SNAKE_CASE : Optional[Any] = None return model_inputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ): '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): SCREAMING_SNAKE_CASE : List[str] = None if generate_kwargs is None: SCREAMING_SNAKE_CASE : int = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name ) SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ ) return model_outputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] for output_ids in model_outputs: SCREAMING_SNAKE_CASE : List[Any] = { """generated_text""": self.tokenizer.decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , ) } records.append(lowerCamelCase_ ) return records
323
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A_ ( unittest.TestCase ): def __init__( self : Dict , UpperCAmelCase : int , UpperCAmelCase : List[str]=1_3 , UpperCAmelCase : Any=3 , UpperCAmelCase : Dict=2_2_4 , UpperCAmelCase : List[Any]=3_0 , UpperCAmelCase : Union[str, Any]=4_0_0 , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ) -> List[Any]: __lowerCAmelCase: Optional[Any] = size if size is not None else {"""height""": 1_8, """width""": 1_8} __lowerCAmelCase: Optional[Any] = parent __lowerCAmelCase: int = batch_size __lowerCAmelCase: Any = num_channels __lowerCAmelCase: str = image_size __lowerCAmelCase: Dict = min_resolution __lowerCAmelCase: List[str] = max_resolution __lowerCAmelCase: str = do_resize __lowerCAmelCase: Optional[Any] = size __lowerCAmelCase: Optional[Any] = do_normalize __lowerCAmelCase: List[Any] = image_mean __lowerCAmelCase: str = image_std def UpperCAmelCase ( self : Any ) -> Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class A_ ( lowercase_ , unittest.TestCase ): _lowercase : List[str] = ViTImageProcessor if is_vision_available() else None def UpperCAmelCase ( self : Dict ) -> int: __lowerCAmelCase: Dict = EfficientFormerImageProcessorTester(self ) @property def UpperCAmelCase ( self : str ) -> Union[str, Any]: return self.image_proc_tester.prepare_image_processor_dict() def UpperCAmelCase ( self : Any ) -> str: __lowerCAmelCase: List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , 'image_mean' ) ) self.assertTrue(hasattr(lowerCamelCase_ , 'image_std' ) ) self.assertTrue(hasattr(lowerCamelCase_ , 'do_normalize' ) ) self.assertTrue(hasattr(lowerCamelCase_ , 'do_resize' ) ) self.assertTrue(hasattr(lowerCamelCase_ , 'size' ) ) def UpperCAmelCase ( self : str ) -> Union[str, Any]: pass def UpperCAmelCase ( self : Union[str, Any] ) -> int: __lowerCAmelCase: Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase: List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input __lowerCAmelCase: List[str] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) # Test batched __lowerCAmelCase: Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) def UpperCAmelCase ( self : Optional[int] ) -> int: __lowerCAmelCase: List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase: List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input __lowerCAmelCase: Optional[int] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) # Test batched __lowerCAmelCase: Tuple = image_processor(lowerCamelCase_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: __lowerCAmelCase: Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase: Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input __lowerCAmelCase: Any = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) # Test batched __lowerCAmelCase: Optional[int] = image_processor(lowerCamelCase_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , )
322
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,) SCREAMING_SNAKE_CASE__ = 10 def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = { """num_train_timesteps""": 2_01, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**lowerCamelCase_ ) return config def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0] SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1] SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 1 scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = scheduler.timesteps SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCamelCase_ ): # 1. scale model input SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0] scheduler.set_timesteps(timesteps=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Dict = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0] with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowerCamelCase_ )
323
0
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase__ ( unittest.TestCase): @slow def __A (self ) -> Tuple: _lowercase =FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) _lowercase =AutoTokenizer.from_pretrained('''xlm-roberta-base''' ) _lowercase ="""The dog is cute and lives in the garden house""" _lowercase =jnp.array([tokenizer.encode(lowerCamelCase_ )] ) _lowercase =(1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim _lowercase =jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) _lowercase =model(lowerCamelCase_ )["""last_hidden_state"""] self.assertEqual(output.shape , lowerCamelCase_ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1e-3 ) )
5
'''simple docstring''' from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ): '''simple docstring''' super().__init__( lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE : Optional[int] = Text( cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[str] = None self.builder.download_and_prepare( download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE : int = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory ) return dataset
323
0
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __lowerCAmelCase = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def snake_case_ ( snake_case , snake_case=None ) -> Union[str, Any]: require_version(deps[pkg] , lowerCamelCase_ )
196
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 3_84 SCREAMING_SNAKE_CASE : Union[str, Any] = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE : List[str] = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2) SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE : Any = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE : int = 1_28 SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE : Optional[Any] = 12 SCREAMING_SNAKE_CASE : str = 5_12 elif "large" in model_name: SCREAMING_SNAKE_CASE : Tuple = 1_92 SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48) SCREAMING_SNAKE_CASE : Tuple = 12 SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68 # set label information SCREAMING_SNAKE_CASE : List[str] = 1_50 SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files""" SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig( embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) SCREAMING_SNAKE_CASE : List[str] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = val def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :] # fmt: on def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = x.shape[0] SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = x.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCamelCase_ , param.shape ) SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ ) if "bn" in key: SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" ) SCREAMING_SNAKE_CASE : Optional[Any] = val # rename keys SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ ) if "norm" in key: SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor() SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCAmelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
323
0
'''simple docstring''' from __future__ import annotations def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[int] ): '''simple docstring''' if b == 0: return (1, 0) (SCREAMING_SNAKE_CASE__) : Optional[int] =extended_euclid(lowerCamelCase_, a % b ) SCREAMING_SNAKE_CASE__ : str =a // b return (y, x - k * y) def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str] ): '''simple docstring''' (SCREAMING_SNAKE_CASE__) : List[Any] =extended_euclid(lowerCamelCase_, lowerCamelCase_ ) SCREAMING_SNAKE_CASE__ : List[Any] =na * na SCREAMING_SNAKE_CASE__ : Union[str, Any] =ra * x * na + ra * y * na return (n % m + m) % m def _a( UpperCamelCase__ : int, UpperCamelCase__ : Optional[int] ): '''simple docstring''' (SCREAMING_SNAKE_CASE__) : str =extended_euclid(lowerCamelCase_, lowerCamelCase_ ) if b < 0: SCREAMING_SNAKE_CASE__ : Union[str, Any] =(b % n + n) % n return b def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : Dict, UpperCamelCase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =invert_modulo(lowerCamelCase_, lowerCamelCase_ ), invert_modulo(lowerCamelCase_, lowerCamelCase_ ) SCREAMING_SNAKE_CASE__ : Any =na * na SCREAMING_SNAKE_CASE__ : List[Any] =ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='chinese_remainder_theorem', verbose=True) testmod(name='chinese_remainder_theorem2', verbose=True) testmod(name='invert_modulo', verbose=True) testmod(name='extended_euclid', verbose=True)
152
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Any = pad_token_id SCREAMING_SNAKE_CASE : List[Any] = max_length SCREAMING_SNAKE_CASE : Optional[int] = vocab SCREAMING_SNAKE_CASE : List[Any] = merges SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab() return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ): '''simple docstring''' return cls(**lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ ) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs( lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
323
0
__lowerCAmelCase : Union[str, Any] ='Tobias Carryer' from time import time class _lowercase : '''simple docstring''' def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any]=int(time() ) ) -> str: # noqa: B008 __SCREAMING_SNAKE_CASE : str = multiplier __SCREAMING_SNAKE_CASE : List[Any] = increment __SCREAMING_SNAKE_CASE : List[str] = modulo __SCREAMING_SNAKE_CASE : str = seed def __magic_name__( self :Any ) -> int: __SCREAMING_SNAKE_CASE : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. __lowerCAmelCase : int =LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1) while True: print(lcg.next_number())
9
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train""" SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Union[str, Any] = streaming SCREAMING_SNAKE_CASE : Optional[int] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Any ): '''simple docstring''' pass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : int = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Tuple = streaming SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Dict ): '''simple docstring''' pass
323
0
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version lowerCAmelCase__ : int ={ '''<''': operator.lt, '''<=''': operator.le, '''==''': operator.eq, '''!=''': operator.ne, '''>=''': operator.ge, '''>''': operator.gt, } def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]: if got_ver is None or want_ver is None: raise ValueError( f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider""" f""" reinstalling {pkg}.""" ) if not ops[op](version.parse(lowerCamelCase_ ) , version.parse(lowerCamelCase_ ) ): raise ImportError( f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" ) def __lowercase ( a__ , a__ = None ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = f"""\n{hint}""" if hint is not None else """""" # non-versioned check if re.match(R'^[\w_\-\d]+$' , lowerCamelCase_ ): __SCREAMING_SNAKE_CASE = requirement, None, None else: __SCREAMING_SNAKE_CASE = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , lowerCamelCase_ ) if not match: raise ValueError( 'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but' f""" got {requirement}""" ) __SCREAMING_SNAKE_CASE = match[0] __SCREAMING_SNAKE_CASE = want_full.split(',' ) # there could be multiple requirements __SCREAMING_SNAKE_CASE = {} for w in want_range: __SCREAMING_SNAKE_CASE = re.findall(R'^([\s!=<>]{1,2})(.+)' , lowerCamelCase_ ) if not match: raise ValueError( 'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,' f""" but got {requirement}""" ) __SCREAMING_SNAKE_CASE = match[0] __SCREAMING_SNAKE_CASE = want_ver if op not in ops: raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" ) # special case if pkg == "python": __SCREAMING_SNAKE_CASE = """.""".join([str(lowerCamelCase_ ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return # check if any version is installed try: __SCREAMING_SNAKE_CASE = importlib.metadata.version(lowerCamelCase_ ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"""The \'{requirement}\' distribution was not found and is required by this application. {hint}""" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def __lowercase ( a__ ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main""" return require_version(lowerCamelCase_ , lowerCamelCase_ )
257
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small""" SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = """en_speaker_1""" SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string""" SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json""" SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings""" def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) SCREAMING_SNAKE_CASE : List[str] = 35 SCREAMING_SNAKE_CASE : List[Any] = 2 SCREAMING_SNAKE_CASE : int = 8 SCREAMING_SNAKE_CASE : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string ) SCREAMING_SNAKE_CASE : Tuple = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
323
0
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCAmelCase :Any = logging.get_logger(__name__) lowerCAmelCase :Any = Dict[str, Any] lowerCAmelCase :str = List[Prediction] @add_end_docstrings(lowercase_ ) class _lowerCamelCase ( lowercase_ ): '''simple docstring''' def __init__( self : List[Any] , *_A : Union[str, Any] , **_A : Dict ) -> List[str]: super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) if self.framework == "tf": raise ValueError(F'The {self.__class__} is only available in PyTorch.' ) requires_backends(self , 'vision' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def __lowerCAmelCase ( self : Union[str, Any] , **_A : Union[str, Any] ) -> List[Any]: __magic_name__ : Any = {} if "threshold" in kwargs: __magic_name__ : Tuple = kwargs["""threshold"""] return {}, {}, postprocess_kwargs def __call__( self : Optional[Any] , *_A : Optional[Any] , **_A : int ) -> str: return super().__call__(*lowerCamelCase_ , **lowerCamelCase_ ) def __lowerCAmelCase ( self : List[str] , _A : str ) -> Optional[int]: __magic_name__ : int = load_image(lowerCamelCase_ ) __magic_name__ : Dict = torch.IntTensor([[image.height, image.width]] ) __magic_name__ : Dict = self.image_processor(images=[image] , return_tensors='pt' ) if self.tokenizer is not None: __magic_name__ : Dict = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' ) __magic_name__ : List[Any] = target_size return inputs def __lowerCAmelCase ( self : Any , _A : List[Any] ) -> Optional[Any]: __magic_name__ : List[str] = model_inputs.pop('target_size' ) __magic_name__ : Tuple = self.model(**lowerCamelCase_ ) __magic_name__ : int = outputs.__class__({'target_size': target_size, **outputs} ) if self.tokenizer is not None: __magic_name__ : str = model_inputs["""bbox"""] return model_outputs def __lowerCAmelCase ( self : Tuple , _A : List[str] , _A : Optional[Any]=0.9 ) -> int: __magic_name__ : Dict = model_outputs["""target_size"""] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. __magic_name__ : List[str] = target_size[0].tolist() def unnormalize(_A : str ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) __magic_name__ : Any = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) __magic_name__ : Optional[int] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] __magic_name__ : Union[str, Any] = [unnormalize(lowerCamelCase_ ) for bbox in model_outputs["""bbox"""].squeeze(0 )] __magic_name__ : Optional[int] = ["""score""", """label""", """box"""] __magic_name__ : List[Any] = [dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) for vals in zip(scores.tolist() , lowerCamelCase_ , lowerCamelCase_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel __magic_name__ : Optional[int] = self.image_processor.post_process_object_detection(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) __magic_name__ : Optional[Any] = raw_annotations[0] __magic_name__ : Tuple = raw_annotation["""scores"""] __magic_name__ : Optional[int] = raw_annotation["""labels"""] __magic_name__ : Dict = raw_annotation["""boxes"""] __magic_name__ : List[Any] = scores.tolist() __magic_name__ : Any = [self.model.config.idalabel[label.item()] for label in labels] __magic_name__ : Tuple = [self._get_bounding_box(lowerCamelCase_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] __magic_name__ : Union[str, Any] = ["""score""", """label""", """box"""] __magic_name__ : Union[str, Any] = [ dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] ) ] return annotation def __lowerCAmelCase ( self : Union[str, Any] , _A : "torch.Tensor" ) -> Optional[int]: if self.framework != "pt": raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' ) __magic_name__ : Union[str, Any] = box.int().tolist() __magic_name__ : str = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
331
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCAmelCase = logging.getLogger(__name__) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return (preds == labels).mean() @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ ) # Set seed set_seed(training_args.seed ) try: SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]() SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels() SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , ) # Get datasets SCREAMING_SNAKE_CASE : Optional[Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE : Dict = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase_ ) -> Dict: SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )} # Data collator SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE : Any = Trainer( model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE : Optional[Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate() SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase_ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(lowerCamelCase_ ) return results def __A ( lowerCamelCase_ ): """simple docstring""" main() if __name__ == "__main__": main()
323
0
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __lowerCAmelCase ( lowercase_ ): UpperCamelCase__ = 42 class __lowerCAmelCase ( lowercase_ , lowercase_ ): @register_to_config def __init__( self :Optional[int] , __magic_name__ :int = 16 , __magic_name__ :int = 88 , __magic_name__ :Optional[int] = None , __magic_name__ :Optional[int] = None , __magic_name__ :int = 1 , __magic_name__ :float = 0.0 , __magic_name__ :int = 32 , __magic_name__ :Optional[int] = None , __magic_name__ :bool = False , __magic_name__ :Optional[int] = None , __magic_name__ :str = "geglu" , __magic_name__ :bool = True , __magic_name__ :bool = True , ): '''simple docstring''' super().__init__() a = num_attention_heads a = attention_head_dim a = num_attention_heads * attention_head_dim a = in_channels a = torch.nn.GroupNorm(num_groups=lowerCamelCase_ , num_channels=lowerCamelCase_ , eps=1E-6 , affine=lowerCamelCase_ ) a = nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) # 3. Define transformers blocks a = nn.ModuleList( [ BasicTransformerBlock( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dropout=lowerCamelCase_ , cross_attention_dim=lowerCamelCase_ , activation_fn=lowerCamelCase_ , attention_bias=lowerCamelCase_ , double_self_attention=lowerCamelCase_ , norm_elementwise_affine=lowerCamelCase_ , ) for d in range(lowerCamelCase_ ) ] ) a = nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :str , __magic_name__ :Tuple=None , __magic_name__ :List[str]=None , __magic_name__ :Optional[Any]=None , __magic_name__ :Dict=1 , __magic_name__ :List[Any]=None , __magic_name__ :bool = True , ): '''simple docstring''' a = hidden_states.shape a = batch_frames // num_frames a = hidden_states a = hidden_states[None, :].reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) a = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) a = self.norm(lowerCamelCase_ ) a = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowerCamelCase_ , lowerCamelCase_ ) a = self.proj_in(lowerCamelCase_ ) # 2. Blocks for block in self.transformer_blocks: a = block( lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , timestep=lowerCamelCase_ , cross_attention_kwargs=lowerCamelCase_ , class_labels=lowerCamelCase_ , ) # 3. Output a = self.proj_out(lowerCamelCase_ ) a = ( hidden_states[None, None, :] .reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) a = hidden_states.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) a = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=lowerCamelCase_ )
228
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block SCREAMING_SNAKE_CASE : int = torch.nn.Convad( lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] ) # down SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0] for i, down_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = output_channel SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i] SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block( lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) self.down_blocks.append(lowerCamelCase_ ) # mid SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # out SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU() SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Tuple = False def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = x SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[Any] ): def custom_forward(*lowerCamelCase_ : List[str] ): return module(*lowerCamelCase_ ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ ) else: # down for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ ) # post-process SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : int = layers_per_block SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad( lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] ) SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None # mid SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # up SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : List[Any] = get_up_block( lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , ) self.up_blocks.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = output_channel # out if norm_type == "spatial": SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : Dict = nn.SiLU() SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Dict = False def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = z SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[str] ): def custom_forward(*lowerCamelCase_ : str ): return module(*lowerCamelCase_ ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ ) # post-process if latent_embeds is None: SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = n_e SCREAMING_SNAKE_CASE : int = vq_embed_dim SCREAMING_SNAKE_CASE : Tuple = beta SCREAMING_SNAKE_CASE : Union[str, Any] = legacy SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) SCREAMING_SNAKE_CASE : Optional[Any] = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0] SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed SCREAMING_SNAKE_CASE : Any = self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: SCREAMING_SNAKE_CASE : Optional[int] = n_e SCREAMING_SNAKE_CASE : Any = sane_index_shape def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 ) SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1 if self.unknown_index == "random": SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: SCREAMING_SNAKE_CASE : Any = self.unknown_index return new.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) if self.re_embed > self.used.shape[0]: # extra token SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ ) return back.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous() SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 ) SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape ) SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : List[str] = None # compute loss for embedding if not self.legacy: SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach() # reshape back to match original input shape SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): '''simple docstring''' if self.remap is not None: SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again # get quantized latent vectors SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ ) if shape is not None: SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ ) # reshape back to match original input shape SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = parameters SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 ) SCREAMING_SNAKE_CASE : Dict = deterministic SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar ) SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar ) if self.deterministic: SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = randn_tensor( self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample return x def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.mean
323
0
"""simple docstring""" import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class __lowercase ( lowercase_ ): '''simple docstring''' __lowerCAmelCase = '''facebook/bart-large-mnli''' __lowerCAmelCase = ( '''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ''' '''should be the text to classify, and `labels`, which should be the list of labels to use for classification. ''' '''It returns the most likely label in the list of provided `labels` for the input text.''' ) __lowerCAmelCase = '''text_classifier''' __lowerCAmelCase = AutoTokenizer __lowerCAmelCase = AutoModelForSequenceClassification __lowerCAmelCase = ['''text''', ['''text''']] __lowerCAmelCase = ['''text'''] def _lowerCamelCase ( self ): super().setup() __a : List[str] = self.model.config __a : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): __a : List[str] = int(lowerCamelCase_ ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): __a : Tuple = labels return self.pre_processor( [text] * len(lowerCamelCase_ ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , ) def _lowerCamelCase ( self , _UpperCAmelCase ): __a : Any = outputs.logits __a : int = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
160
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : str = 3 SCREAMING_SNAKE_CASE : List[Any] = (32, 32) SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input return init_dict, inputs_dict
323
0
import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=30 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=0.6 , UpperCAmelCase=None , ): """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = mask_ratio _UpperCAmelCase = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _UpperCAmelCase = (image_size // patch_size) ** 2 _UpperCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = ViTMAEModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _UpperCAmelCase = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = ViTMAEForPreTraining(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _UpperCAmelCase = model(lowerCamelCase_ ) _UpperCAmelCase = (self.image_size // self.patch_size) ** 2 _UpperCAmelCase = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _UpperCAmelCase = 1 _UpperCAmelCase = ViTMAEForPreTraining(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCAmelCase = model(lowerCamelCase_ ) _UpperCAmelCase = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase): """simple docstring""" UpperCamelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () UpperCamelCase__ = {"feature-extraction": ViTMAEModel} if is_torch_available() else {} UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = ViTMAEModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def UpperCamelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def UpperCamelCase ( self ): """simple docstring""" pass def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(lowerCamelCase_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" np.random.seed(2 ) _UpperCAmelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) _UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _UpperCAmelCase = torch.from_numpy(lowerCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _UpperCAmelCase = pt_noise super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) _UpperCAmelCase = outputs[0].cpu().numpy() _UpperCAmelCase = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase_ ) _UpperCAmelCase = model_class.from_pretrained(lowerCamelCase_ ) model.to(lowerCamelCase_ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) # Make sure we don't have nans _UpperCAmelCase = after_outputs[0].cpu().numpy() _UpperCAmelCase = 0 _UpperCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase_ , 1e-5 ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def UpperCamelCase ( self ): """simple docstring""" pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def UpperCamelCase ( self ): """simple docstring""" pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def UpperCamelCase ( self ): """simple docstring""" pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def UpperCamelCase ( self ): """simple docstring""" pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase ( self ): """simple docstring""" pass @slow def UpperCamelCase ( self ): """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = ViTMAEModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def __A ( )-> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCamelCase ( unittest.TestCase): """simple docstring""" @cached_property def UpperCamelCase ( self ): """simple docstring""" return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def UpperCamelCase ( self ): """simple docstring""" np.random.seed(2 ) _UpperCAmelCase = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(lowerCamelCase_ ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _UpperCAmelCase = ViTMAEConfig() _UpperCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _UpperCAmelCase = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**lowerCamelCase_ , noise=torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ ) ) # verify the logits _UpperCAmelCase = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) _UpperCAmelCase = torch.tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase_ ) , atol=1e-4 ) )
39
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' pass def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ ) import datasets SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) SCREAMING_SNAKE_CASE : Any = depth_estimator( [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] ) self.assertEqual( [ {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, ] , lowerCamelCase_ , ) @require_tf @unittest.skip("""Depth estimation is not implemented in TF""" ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large""" SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
323
0
"""simple docstring""" import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = [ '''word_embeddings_layernorm.weight''', '''word_embeddings_layernorm.bias''', '''input_layernorm.weight''', '''input_layernorm.bias''', '''post_attention_layernorm.weight''', '''post_attention_layernorm.bias''', '''self_attention.dense.bias''', '''mlp.dense_4h_to_h.bias''', '''ln_f.weight''', '''ln_f.bias''', ] lowerCAmelCase__ = [ '''mlp.dense_4h_to_h.weight''', '''self_attention.dense.weight''', ] def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = { """word_embeddings.weight""": """word_embeddings.weight""", """word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""", """word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""", """weight""": """ln_f.weight""", """bias""": """ln_f.bias""", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks UpperCamelCase = int(re.match(r".*layer_(\d*).*" , lowerCamelCase_ )[1] ) layer_number -= 3 return F"h.{layer_number}." + key def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" if dtype == torch.bool: return 1 / 8 UpperCamelCase = re.search(r"[^\d](\d+)$" , str(lowerCamelCase_ ) ) if bit_search is None: raise ValueError(F"`dtype` is not a valid dtype: {dtype}." ) UpperCamelCase = int(bit_search.groups()[0] ) return bit_size // 8 def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" if bloom_config_file == "": UpperCamelCase = BloomConfig() else: UpperCamelCase = BloomConfig.from_json_file(lowerCamelCase_ ) if shard_model: UpperCamelCase = os.listdir(lowerCamelCase_ ) UpperCamelCase = sorted(filter(lambda _SCREAMING_SNAKE_CASE : s.startswith("layer" ) and "model_00" in s , lowerCamelCase_ ) ) UpperCamelCase = {"""weight_map""": {}, """metadata""": {}} UpperCamelCase = 0 UpperCamelCase = None UpperCamelCase = BloomConfig() for j, file in enumerate(lowerCamelCase_ ): print("Processing file: {}".format(lowerCamelCase_ ) ) UpperCamelCase = None for i in range(lowerCamelCase_ ): # load all TP files UpperCamelCase = file.replace("model_00" , F"model_0{i}" ) UpperCamelCase = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , map_location="cpu" ) # Rename keys in the transformers names UpperCamelCase = list(temp.keys() ) for key in keys: UpperCamelCase = temp.pop(lowerCamelCase_ ) if tensors is None: UpperCamelCase = temp else: for key in tensors.keys(): if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCamelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCamelCase = torch.cat([tensors[key], temp[key]] , dim=lowerCamelCase_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCamelCase = tensors[key] / pretraining_tp torch.save( lowerCamelCase_ , os.path.join( lowerCamelCase_ , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(lowerCamelCase_ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): UpperCamelCase = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: UpperCamelCase = """pytorch_model_{}-of-{}.bin""".format( str(j + 1 ).zfill(5 ) , str(len(lowerCamelCase_ ) ).zfill(5 ) ) UpperCamelCase = BloomConfig() UpperCamelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME UpperCamelCase = total_size with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) with open(os.path.join(lowerCamelCase_ , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f: UpperCamelCase = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + """\n""" f.write(lowerCamelCase_ ) else: UpperCamelCase = BloomModel(lowerCamelCase_ ) UpperCamelCase = os.listdir(lowerCamelCase_ ) UpperCamelCase = sorted(filter(lambda _SCREAMING_SNAKE_CASE : s.startswith("layer" ) and "model_00" in s , lowerCamelCase_ ) ) UpperCamelCase = None for i, file in enumerate(lowerCamelCase_ ): UpperCamelCase = None for i in range(lowerCamelCase_ ): # load all TP files UpperCamelCase = file.replace("model_00" , F"model_0{i}" ) UpperCamelCase = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , map_location="cpu" ) # Rename keys in the transformers names UpperCamelCase = list(temp.keys() ) for key in keys: UpperCamelCase = temp.pop(lowerCamelCase_ ) if tensors is None: UpperCamelCase = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCamelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCamelCase = torch.cat([tensors[key], temp[key]] , dim=lowerCamelCase_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCamelCase = tensors[key] / pretraining_tp UpperCamelCase = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ ) assert not other_keys.unexpected_keys, F"The keys {other_keys.unexpected_keys} are unexpected" if missing_keys is None: UpperCamelCase = set(other_keys.missing_keys ) else: UpperCamelCase = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F"The keys {missing_keys} are missing" # Save pytorch-model os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) UpperCamelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME UpperCamelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" ) if config.torch_dtype is not None: UpperCamelCase = model.to(config.torch_dtype ) torch.save(model.state_dict() , lowerCamelCase_ ) print(F"Save configuration file to {pytorch_config_dump_path}" ) with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bloom_checkpoint_path''', default=None, type=str, required=True, help='''Path to the Megatron-LM checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--bloom_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--shard_model''', action='''store_true''', help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''', ) parser.add_argument( '''--pretraining_tp''', default=4, type=int, help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''', ) lowerCAmelCase__ = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
153
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18} SCREAMING_SNAKE_CASE : Optional[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = min_resolution SCREAMING_SNAKE_CASE : List[str] = max_resolution SCREAMING_SNAKE_CASE : str = do_resize SCREAMING_SNAKE_CASE : Optional[Any] = size SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize SCREAMING_SNAKE_CASE : List[Any] = image_mean SCREAMING_SNAKE_CASE : str = image_std def lowerCamelCase_ ( self : Any ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' pass def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
323
0
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class A_ ( lowercase_ ): _lowercase : List[Any] = (CMStochasticIterativeScheduler,) _lowercase : Union[str, Any] = 1_0 def UpperCAmelCase ( self : List[str] , **UpperCAmelCase : int ) -> Any: __lowerCAmelCase: Dict = { """num_train_timesteps""": 2_0_1, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**lowerCamelCase_ ) return config def UpperCAmelCase ( self : str ) -> int: __lowerCAmelCase: Dict = 1_0 __lowerCAmelCase: Union[str, Any] = self.get_scheduler_config() __lowerCAmelCase: int = self.scheduler_classes[0](**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) __lowerCAmelCase: List[str] = scheduler.timesteps[0] __lowerCAmelCase: Dict = scheduler.timesteps[1] __lowerCAmelCase: Optional[Any] = self.dummy_sample __lowerCAmelCase: List[str] = 0.1 * sample __lowerCAmelCase: Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample __lowerCAmelCase: Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase ( self : List[Any] ) -> List[str]: for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def UpperCAmelCase ( self : Union[str, Any] ) -> int: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCamelCase_ ) def UpperCAmelCase ( self : List[str] ) -> str: __lowerCAmelCase: str = self.scheduler_classes[0] __lowerCAmelCase: Optional[Any] = self.get_scheduler_config() __lowerCAmelCase: List[str] = scheduler_class(**lowerCamelCase_ ) __lowerCAmelCase: Dict = 1 scheduler.set_timesteps(lowerCamelCase_ ) __lowerCAmelCase: int = scheduler.timesteps __lowerCAmelCase: str = torch.manual_seed(0 ) __lowerCAmelCase: Any = self.dummy_model() __lowerCAmelCase: Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCamelCase_ ): # 1. scale model input __lowerCAmelCase: Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual __lowerCAmelCase: Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 __lowerCAmelCase: List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample __lowerCAmelCase: Union[str, Any] = pred_prev_sample __lowerCAmelCase: Any = torch.sum(torch.abs(lowerCamelCase_ ) ) __lowerCAmelCase: Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 192.7614 ) < 1E-2 assert abs(result_mean.item() - 0.2510 ) < 1E-3 def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: __lowerCAmelCase: List[Any] = self.scheduler_classes[0] __lowerCAmelCase: Tuple = self.get_scheduler_config() __lowerCAmelCase: int = scheduler_class(**lowerCamelCase_ ) __lowerCAmelCase: Optional[int] = [1_0_6, 0] scheduler.set_timesteps(timesteps=lowerCamelCase_ ) __lowerCAmelCase: Tuple = scheduler.timesteps __lowerCAmelCase: Tuple = torch.manual_seed(0 ) __lowerCAmelCase: Any = self.dummy_model() __lowerCAmelCase: List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input __lowerCAmelCase: Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual __lowerCAmelCase: Any = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 __lowerCAmelCase: str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample __lowerCAmelCase: Dict = pred_prev_sample __lowerCAmelCase: Any = torch.sum(torch.abs(lowerCamelCase_ ) ) __lowerCAmelCase: Tuple = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 347.6357 ) < 1E-2 assert abs(result_mean.item() - 0.4527 ) < 1E-3 def UpperCAmelCase ( self : Tuple ) -> Dict: __lowerCAmelCase: List[str] = self.scheduler_classes[0] __lowerCAmelCase: Optional[int] = self.get_scheduler_config() __lowerCAmelCase: Any = scheduler_class(**lowerCamelCase_ ) __lowerCAmelCase: Any = [3_9, 3_0, 1_2, 1_5, 0] with self.assertRaises(lowerCamelCase_ , msg='`timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=lowerCamelCase_ ) def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: __lowerCAmelCase: str = self.scheduler_classes[0] __lowerCAmelCase: Dict = self.get_scheduler_config() __lowerCAmelCase: Optional[int] = scheduler_class(**lowerCamelCase_ ) __lowerCAmelCase: int = [3_9, 3_0, 1_2, 1, 0] __lowerCAmelCase: Optional[Any] = len(lowerCamelCase_ ) with self.assertRaises(lowerCamelCase_ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ): scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: __lowerCAmelCase: List[str] = self.scheduler_classes[0] __lowerCAmelCase: Any = self.get_scheduler_config() __lowerCAmelCase: int = scheduler_class(**lowerCamelCase_ ) __lowerCAmelCase: List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCamelCase_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=lowerCamelCase_ )
322
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCAmelCase = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
323
0
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated UpperCAmelCase__ = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ UpperCAmelCase__ = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def UpperCAmelCase_ ( __snake_case ) -> Union[str, Any]: """simple docstring""" _lowercase =numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0] @deprecated(lowerCamelCase_ , '''Please use tf.data to implement this functionality.''' ) def UpperCAmelCase_ ( __snake_case ) -> Tuple: """simple docstring""" print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream: _lowercase =_readaa(lowerCamelCase_ ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) _lowercase =_readaa(lowerCamelCase_ ) _lowercase =_readaa(lowerCamelCase_ ) _lowercase =_readaa(lowerCamelCase_ ) _lowercase =bytestream.read(rows * cols * num_images ) _lowercase =numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta ) _lowercase =data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 ) return data @deprecated(lowerCamelCase_ , '''Please use tf.one_hot on tensors.''' ) def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]: """simple docstring""" _lowercase =labels_dense.shape[0] _lowercase =numpy.arange(lowerCamelCase_ ) * num_classes _lowercase =numpy.zeros((num_labels, num_classes) ) _lowercase =1 return labels_one_hot @deprecated(lowerCamelCase_ , '''Please use tf.data to implement this functionality.''' ) def UpperCAmelCase_ ( __snake_case , __snake_case=False , __snake_case=10 ) -> Optional[Any]: """simple docstring""" print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream: _lowercase =_readaa(lowerCamelCase_ ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) _lowercase =_readaa(lowerCamelCase_ ) _lowercase =bytestream.read(lowerCamelCase_ ) _lowercase =numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ ) return labels class lowerCamelCase__ : @deprecated( lowerCamelCase_ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=dtypes.floataa , UpperCAmelCase=True , UpperCAmelCase=None , ) -> int: _lowercase =random_seed.get_seed(lowerCamelCase_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) _lowercase =dtypes.as_dtype(lowerCamelCase_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: _lowercase =1_0_0_0_0 _lowercase =one_hot else: assert ( images.shape[0] == labels.shape[0] ), f"images.shape: {images.shape} labels.shape: {labels.shape}" _lowercase =images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 _lowercase =images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. _lowercase =images.astype(numpy.floataa ) _lowercase =numpy.multiply(lowerCamelCase_ , 1.0 / 255.0 ) _lowercase =images _lowercase =labels _lowercase =0 _lowercase =0 @property def __A (self ) -> Union[str, Any]: return self._images @property def __A (self ) -> Optional[int]: return self._labels @property def __A (self ) -> Union[str, Any]: return self._num_examples @property def __A (self ) -> List[Any]: return self._epochs_completed def __A (self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True ) -> List[str]: if fake_data: _lowercase =[1] * 7_8_4 _lowercase =[1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(lowerCamelCase_ )], [fake_label for _ in range(lowerCamelCase_ )], ) _lowercase =self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: _lowercase =numpy.arange(self._num_examples ) numpy.random.shuffle(lowerCamelCase_ ) _lowercase =self.images[perma] _lowercase =self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch _lowercase =self._num_examples - start _lowercase =self._images[start : self._num_examples] _lowercase =self._labels[start : self._num_examples] # Shuffle the data if shuffle: _lowercase =numpy.arange(self._num_examples ) numpy.random.shuffle(lowerCamelCase_ ) _lowercase =self.images[perm] _lowercase =self.labels[perm] # Start next epoch _lowercase =0 _lowercase =batch_size - rest_num_examples _lowercase =self._index_in_epoch _lowercase =self._images[start:end] _lowercase =self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size _lowercase =self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(lowerCamelCase_ , '''Please write your own downloading logic.''' ) def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Tuple: """simple docstring""" if not gfile.Exists(lowerCamelCase_ ): gfile.MakeDirs(lowerCamelCase_ ) _lowercase =os.path.join(lowerCamelCase_ , lowerCamelCase_ ) if not gfile.Exists(lowerCamelCase_ ): urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310 with gfile.GFile(lowerCamelCase_ ) as f: _lowercase =f.size() print('''Successfully downloaded''' , lowerCamelCase_ , lowerCamelCase_ , '''bytes.''' ) return filepath @deprecated( lowerCamelCase_ , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def UpperCAmelCase_ ( __snake_case , __snake_case=False , __snake_case=False , __snake_case=dtypes.floataa , __snake_case=True , __snake_case=5000 , __snake_case=None , __snake_case=DEFAULT_SOURCE_URL , ) -> int: """simple docstring""" if fake_data: def fake(): return _DataSet( [] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ ) _lowercase =fake() _lowercase =fake() _lowercase =fake() return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ ) if not source_url: # empty string check _lowercase =DEFAULT_SOURCE_URL _lowercase ="""train-images-idx3-ubyte.gz""" _lowercase ="""train-labels-idx1-ubyte.gz""" _lowercase ="""t10k-images-idx3-ubyte.gz""" _lowercase ="""t10k-labels-idx1-ubyte.gz""" _lowercase =_maybe_download( lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file ) with gfile.Open(lowerCamelCase_ , '''rb''' ) as f: _lowercase =_extract_images(lowerCamelCase_ ) _lowercase =_maybe_download( lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file ) with gfile.Open(lowerCamelCase_ , '''rb''' ) as f: _lowercase =_extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ ) _lowercase =_maybe_download( lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file ) with gfile.Open(lowerCamelCase_ , '''rb''' ) as f: _lowercase =_extract_images(lowerCamelCase_ ) _lowercase =_maybe_download( lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file ) with gfile.Open(lowerCamelCase_ , '''rb''' ) as f: _lowercase =_extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ ) if not 0 <= validation_size <= len(lowerCamelCase_ ): _lowercase =( """Validation size should be between 0 and """ F"{len(lowerCamelCase_ )}. Received: {validation_size}." ) raise ValueError(lowerCamelCase_ ) _lowercase =train_images[:validation_size] _lowercase =train_labels[:validation_size] _lowercase =train_images[validation_size:] _lowercase =train_labels[validation_size:] _lowercase ={"""dtype""": dtype, """reshape""": reshape, """seed""": seed} _lowercase =_DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) _lowercase =_DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) _lowercase =_DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
5
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
323
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def snake_case_ ( snake_case ) -> Union[str, Any]: lowercase__: str = 3_84 lowercase__: Union[str, Any] = 7 if "tiny" in model_name: lowercase__: List[str] = 96 lowercase__: List[str] = (2, 2, 6, 2) lowercase__: List[Any] = (3, 6, 12, 24) elif "small" in model_name: lowercase__: Any = 96 lowercase__: List[str] = (2, 2, 18, 2) lowercase__: int = (3, 6, 12, 24) elif "base" in model_name: lowercase__: int = 1_28 lowercase__: Any = (2, 2, 18, 2) lowercase__: int = (4, 8, 16, 32) lowercase__: Optional[Any] = 12 lowercase__: str = 5_12 elif "large" in model_name: lowercase__: Tuple = 1_92 lowercase__: Tuple = (2, 2, 18, 2) lowercase__: List[str] = (6, 12, 24, 48) lowercase__: Tuple = 12 lowercase__: Union[str, Any] = 7_68 # set label information lowercase__: List[str] = 1_50 lowercase__: Optional[Any] = """huggingface/label-files""" lowercase__: List[str] = """ade20k-id2label.json""" lowercase__: Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) ) lowercase__: str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} lowercase__: int = {v: k for k, v in idalabel.items()} lowercase__: Optional[Any] = SwinConfig( embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) lowercase__: List[str] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def snake_case_ ( snake_case ) -> Tuple: lowercase__: List[str] = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def snake_case_ ( snake_case , snake_case , snake_case ) -> List[str]: lowercase__: int = dct.pop(lowerCamelCase_ ) lowercase__: List[Any] = val def snake_case_ ( snake_case , snake_case ) -> Any: lowercase__: Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowercase__: Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowercase__: Union[str, Any] = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) lowercase__: Union[str, Any] = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict lowercase__: int = in_proj_weight[:dim, :] lowercase__: Optional[int] = in_proj_bias[: dim] lowercase__: Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] lowercase__: Any = in_proj_bias[ dim : dim * 2 ] lowercase__: List[Any] = in_proj_weight[ -dim :, : ] lowercase__: str = in_proj_bias[-dim :] # fmt: on def snake_case_ ( snake_case ) -> Any: lowercase__: Tuple = x.shape lowercase__: Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 ) lowercase__: Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def snake_case_ ( snake_case ) -> int: lowercase__: Dict = x.shape lowercase__: Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 ) lowercase__: str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def snake_case_ ( snake_case ) -> Any: lowercase__: str = x.shape[0] lowercase__: List[str] = x.reshape(4 , in_channel // 4 ) lowercase__: str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def snake_case_ ( snake_case ) -> List[Any]: lowercase__: Tuple = x.shape[0] lowercase__: Optional[int] = x.reshape(in_channel // 4 , 4 ) lowercase__: str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def snake_case_ ( snake_case , snake_case , snake_case ) -> str: lowercase__: Dict = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } lowercase__: List[str] = model_name_to_url[model_name] lowercase__: Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' , file_name=lowerCamelCase_ )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCamelCase_ , param.shape ) lowercase__: Dict = get_upernet_config(lowerCamelCase_ ) lowercase__: Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowercase__: Union[str, Any] = state_dict.pop(lowerCamelCase_ ) if "bn" in key: lowercase__: List[str] = key.replace('bn' , 'batch_norm' ) lowercase__: Optional[Any] = val # rename keys lowercase__: Union[str, Any] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowercase__: Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ ) if "norm" in key: lowercase__: Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image lowercase__: Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" lowercase__: Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('RGB' ) lowercase__: Optional[int] = SegformerImageProcessor() lowercase__: str = processor(lowerCamelCase_ , return_tensors='pt' ).pixel_values with torch.no_grad(): lowercase__: List[str] = model(lowerCamelCase_ ) lowercase__: Dict = outputs.logits print(logits.shape ) print('First values of logits:' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowercase__: Optional[Any] = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": lowercase__: Optional[Any] = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": lowercase__: str = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": lowercase__: str = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase_ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[F'''upernet-swin-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __lowerCAmelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
196
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number | (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number & ~(1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number ^ (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return ((number >> position) & 1) == 1 def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
323
0
'''simple docstring''' import mpmath # for roots of unity import numpy as np class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] , __lowercase : List[Any]=None , __lowercase : Tuple=None ) -> Any: SCREAMING_SNAKE_CASE__ : Any =list(poly_a or [0] )[:] SCREAMING_SNAKE_CASE__ : Optional[int] =list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() SCREAMING_SNAKE_CASE__ : Tuple =len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() SCREAMING_SNAKE_CASE__ : List[str] =len(self.polyB ) # Add 0 to make lengths equal a power of 2 SCREAMING_SNAKE_CASE__ : Dict =int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform SCREAMING_SNAKE_CASE__ : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product SCREAMING_SNAKE_CASE__ : Optional[int] =self.__multiply() def __magic_name__ ( self : int , __lowercase : Optional[Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Tuple =[[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(lowerCamelCase_ ) <= 1: return dft[0] # SCREAMING_SNAKE_CASE__ : Dict =self.c_max_length // 2 while next_ncol > 0: SCREAMING_SNAKE_CASE__ : Optional[Any] =[[] for i in range(lowerCamelCase_ )] SCREAMING_SNAKE_CASE__ : Dict =self.root**next_ncol # First half of next step SCREAMING_SNAKE_CASE__ : int =1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(lowerCamelCase_ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step SCREAMING_SNAKE_CASE__ : str =1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(lowerCamelCase_ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : List[str] =new_dft SCREAMING_SNAKE_CASE__ : List[str] =next_ncol // 2 return dft[0] def __magic_name__ ( self : Any ) -> str: SCREAMING_SNAKE_CASE__ : List[Any] =self.__dft('''A''' ) SCREAMING_SNAKE_CASE__ : Any =self.__dft('''B''' ) SCREAMING_SNAKE_CASE__ : int =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT SCREAMING_SNAKE_CASE__ : int =2 while next_ncol <= self.c_max_length: SCREAMING_SNAKE_CASE__ : Union[str, Any] =[[] for i in range(lowerCamelCase_ )] SCREAMING_SNAKE_CASE__ : List[Any] =self.root ** (next_ncol // 2) SCREAMING_SNAKE_CASE__ : Optional[int] =1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Optional[Any] =new_inverse_c next_ncol *= 2 # Unpack SCREAMING_SNAKE_CASE__ : Optional[int] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : List[Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] ="""A = """ + """ + """.join( F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A] ) ) SCREAMING_SNAKE_CASE__ : str ="""B = """ + """ + """.join( F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B] ) ) SCREAMING_SNAKE_CASE__ : List[str] ="""A*B = """ + """ + """.join( F"{coef}*x^{i}" for coef, i in enumerate(self.product ) ) return F"{a}\n{b}\n{c}" # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
152
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[int] = batch_size SCREAMING_SNAKE_CASE : Any = seq_length SCREAMING_SNAKE_CASE : List[str] = is_training SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : List[Any] = rotary_dim SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = vocab_size - 1 SCREAMING_SNAKE_CASE : str = vocab_size - 1 SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1 def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : List[str] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 20 SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : str = model( input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 20 SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Dict = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @tooslow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id SCREAMING_SNAKE_CASE : str = jax.jit(model.generate ) SCREAMING_SNAKE_CASE : str = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = fx_state with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Tuple = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase_ )
323
0
from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _UpperCamelCase ( lowercase__ = "laptop" ): __SCREAMING_SNAKE_CASE : List[str] = F'''https://www.amazon.in/laptop/s?k={product}''' __SCREAMING_SNAKE_CASE : int = { """User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", """Accept-Language""": """en-US, en;q=0.5""", } __SCREAMING_SNAKE_CASE : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).text ) # Initialize a Pandas dataframe with the column titles __SCREAMING_SNAKE_CASE : Any = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: __SCREAMING_SNAKE_CASE : int = item.ha.text __SCREAMING_SNAKE_CASE : Tuple = """https://www.amazon.in/""" + item.ha.a["""href"""] __SCREAMING_SNAKE_CASE : List[str] = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: __SCREAMING_SNAKE_CASE : Any = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: __SCREAMING_SNAKE_CASE : str = """Not available""" try: __SCREAMING_SNAKE_CASE : Any = ( """₹""" + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: __SCREAMING_SNAKE_CASE : Any = """""" try: __SCREAMING_SNAKE_CASE : str = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: __SCREAMING_SNAKE_CASE : Tuple = float('''nan''' ) except AttributeError: pass __SCREAMING_SNAKE_CASE : int = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] __SCREAMING_SNAKE_CASE : Optional[int] = """ """ __SCREAMING_SNAKE_CASE : Tuple = """ """ data_frame.index += 1 return data_frame if __name__ == "__main__": __lowerCAmelCase : str ='headphones' get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
9
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' f''' `n_embd`: {n_embd} are not equal.''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim SCREAMING_SNAKE_CASE : Tuple = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : str = ( nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : Any = GPTaConfig( vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 ) SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ): '''simple docstring''' return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.encode_prefix(lowerCamelCase_ ) @torch.no_grad() def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 ) SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : Tuple = [] for feature in features: SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature # Only support beam search for now SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam( input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = eos_token_id SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool ) if input_embeds is not None: SCREAMING_SNAKE_CASE : Dict = input_embeds else: SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ ) for i in range(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = outputs.logits SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log() if scores is None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: SCREAMING_SNAKE_CASE : List[Any] = next_tokens else: SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] ) SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 ) else: SCREAMING_SNAKE_CASE : Tuple = -float(np.inf ) SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits seq_lengths[~is_stopped] += 1 SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1] SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source] SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1] SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 ) SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source] SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source] SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source] SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 ) SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze() if is_stopped.all(): break SCREAMING_SNAKE_CASE : int = scores / seq_lengths SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ ) # tokens tensors are already padded to max_seq_length SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order] SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
323
0
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ : List[Any] =get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase__ : List[str] =250004 lowerCAmelCase__ : Optional[int] =250020 @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( lowercase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase__ : str = MBartTokenizer UpperCamelCase__ : List[str] = MBartTokenizerFast UpperCamelCase__ : List[Any] = True UpperCamelCase__ : List[Any] = True def _A ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = MBartTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = MBartTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowerCamelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def _A ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) __SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCamelCase_ ) # Save tokenizer rust, legacy_format=True __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) # Save tokenizer rust, legacy_format=False __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCamelCase__ : Tuple = '''facebook/mbart-large-en-ro''' UpperCamelCase__ : Any = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] UpperCamelCase__ : str = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] UpperCamelCase__ : List[str] = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE] @classmethod def _A ( cls ): '''simple docstring''' __SCREAMING_SNAKE_CASE = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) __SCREAMING_SNAKE_CASE = 1 return cls def _A ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250_004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250_020 ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ ) def _A ( self ): '''simple docstring''' self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids ) __SCREAMING_SNAKE_CASE = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2] __SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCamelCase_ ) self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ ) def _A ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250_026, 250_001] ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase_ ) __SCREAMING_SNAKE_CASE = MBartTokenizer.from_pretrained(lowerCamelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ ) @require_torch def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='pt' ) __SCREAMING_SNAKE_CASE = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) __SCREAMING_SNAKE_CASE = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) __SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='pt' ) __SCREAMING_SNAKE_CASE = self.tokenizer( text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='pt' ) __SCREAMING_SNAKE_CASE = targets["""input_ids"""] __SCREAMING_SNAKE_CASE = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(lowerCamelCase_ ) , { # A, test, EOS, en_XX 'input_ids': [[62, 3_034, 2, 250_004]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 250_001, } , )
257
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git_vision_model''' def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : str = attention_dropout SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : List[str] = hidden_act @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ): '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git''' def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ ) if vision_config is None: SCREAMING_SNAKE_CASE : Any = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : Tuple = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings SCREAMING_SNAKE_CASE : int = num_image_with_embedding SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id SCREAMING_SNAKE_CASE : str = eos_token_id def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
323
0
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" if "cls_token" in name: __magic_name__ : Dict = name.replace('cls_token' , 'vit.embeddings.cls_token' ) if "mask_token" in name: __magic_name__ : Tuple = name.replace('mask_token' , 'decoder.mask_token' ) if "decoder_pos_embed" in name: __magic_name__ : Tuple = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: __magic_name__ : Optional[int] = name.replace('pos_embed' , 'vit.embeddings.position_embeddings' ) if "patch_embed.proj" in name: __magic_name__ : str = name.replace('patch_embed.proj' , 'vit.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __magic_name__ : List[str] = name.replace('patch_embed.norm' , 'vit.embeddings.norm' ) if "decoder_blocks" in name: __magic_name__ : List[str] = name.replace('decoder_blocks' , 'decoder.decoder_layers' ) if "blocks" in name: __magic_name__ : Optional[Any] = name.replace('blocks' , 'vit.encoder.layer' ) if "attn.proj" in name: __magic_name__ : Dict = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: __magic_name__ : Union[str, Any] = name.replace('attn' , 'attention.self' ) if "norm1" in name: __magic_name__ : Any = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __magic_name__ : List[str] = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __magic_name__ : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __magic_name__ : Any = name.replace('mlp.fc2' , 'output.dense' ) if "decoder_embed" in name: __magic_name__ : Dict = name.replace('decoder_embed' , 'decoder.decoder_embed' ) if "decoder_norm" in name: __magic_name__ : int = name.replace('decoder_norm' , 'decoder.decoder_norm' ) if "decoder_pred" in name: __magic_name__ : Tuple = name.replace('decoder_pred' , 'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name: __magic_name__ : Union[str, Any] = name.replace('norm.weight' , 'vit.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name: __magic_name__ : Optional[Any] = name.replace('norm.bias' , 'vit.layernorm.bias' ) return name def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] ): """simple docstring""" for key in orig_state_dict.copy().keys(): __magic_name__ : Union[str, Any] = orig_state_dict.pop(lowerCamelCase_ ) if "qkv" in key: __magic_name__ : Union[str, Any] = key.split('.' ) __magic_name__ : str = int(key_split[1] ) if "decoder_blocks" in key: __magic_name__ : int = config.decoder_hidden_size __magic_name__ : Union[str, Any] = """decoder.decoder_layers.""" if "weight" in key: __magic_name__ : Union[str, Any] = val[:dim, :] __magic_name__ : List[str] = val[dim : dim * 2, :] __magic_name__ : int = val[-dim:, :] elif "bias" in key: __magic_name__ : Tuple = val[:dim] __magic_name__ : List[Any] = val[dim : dim * 2] __magic_name__ : Dict = val[-dim:] else: __magic_name__ : Tuple = config.hidden_size __magic_name__ : List[Any] = """vit.encoder.layer.""" if "weight" in key: __magic_name__ : Optional[int] = val[:dim, :] __magic_name__ : Dict = val[dim : dim * 2, :] __magic_name__ : str = val[-dim:, :] elif "bias" in key: __magic_name__ : Optional[Any] = val[:dim] __magic_name__ : List[Any] = val[dim : dim * 2] __magic_name__ : Optional[int] = val[-dim:] else: __magic_name__ : List[Any] = val return orig_state_dict def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Any ): """simple docstring""" __magic_name__ : Union[str, Any] = ViTMAEConfig() if "large" in checkpoint_url: __magic_name__ : Tuple = 1024 __magic_name__ : Optional[Any] = 4096 __magic_name__ : Any = 24 __magic_name__ : Optional[int] = 16 elif "huge" in checkpoint_url: __magic_name__ : Dict = 14 __magic_name__ : Union[str, Any] = 1280 __magic_name__ : Any = 5120 __magic_name__ : Optional[Any] = 32 __magic_name__ : Union[str, Any] = 16 __magic_name__ : List[str] = ViTMAEForPreTraining(lowerCamelCase_ ) __magic_name__ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )["""model"""] __magic_name__ : Tuple = ViTMAEImageProcessor(size=config.image_size ) __magic_name__ : List[Any] = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) model.eval() __magic_name__ : Optional[Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg""" __magic_name__ : Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ) __magic_name__ : Dict = ViTMAEImageProcessor(size=config.image_size ) __magic_name__ : Any = image_processor(images=lowerCamelCase_ , return_tensors='pt' ) # forward pass torch.manual_seed(2 ) __magic_name__ : List[str] = model(**lowerCamelCase_ ) __magic_name__ : Optional[int] = outputs.logits if "large" in checkpoint_url: __magic_name__ : Tuple = torch.tensor( [[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] ) elif "huge" in checkpoint_url: __magic_name__ : Union[str, Any] = torch.tensor( [[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] ) else: __magic_name__ : Union[str, Any] = torch.tensor( [[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase_ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": lowerCAmelCase :int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCAmelCase :List[Any] = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
331
'''simple docstring''' from manim import * class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 ) SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 ) SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i, rect in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 ) target.move_to(lowerCamelCase_ ) model_arr.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(lowerCamelCase_ ) self.add(*lowerCamelCase_ , *lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) disk.move_to([-4, -1.25, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 ) input.set_fill(lowerCamelCase_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 ) self.play(Write(lowerCamelCase_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 ) self.play(MoveToTarget(lowerCamelCase_ ) ) self.play(FadeOut(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02} self.play( Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) SCREAMING_SNAKE_CASE : Optional[int] = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) SCREAMING_SNAKE_CASE : Any = AnimationGroup( FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(lowerCamelCase_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: SCREAMING_SNAKE_CASE : Optional[Any] = 0.7 self.play( Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = a_c SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , ) SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) ) self.wait()
323
0
from __future__ import annotations def __A ( __lowerCamelCase ) -> int: a = str(lowerCamelCase_ ) return len(lowerCamelCase_ ) == 9 and set(lowerCamelCase_ ) == set("""123456789""" ) def __A ( ) -> Optional[int]: for base_num in range(9999 , 4999 , -1 ): a = 10_0002 * base_num if is_9_pandigital(lowerCamelCase_ ): return candidate for base_num in range(333 , 99 , -1 ): a = 100_2003 * base_num if is_9_pandigital(lowerCamelCase_ ): return candidate return None if __name__ == "__main__": print(F'{solution() = }')
228
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = graph # mapping node to its parent in resulting breadth first tree SCREAMING_SNAKE_CASE : dict[str, str | None] = {} SCREAMING_SNAKE_CASE : List[str] = source_vertex def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex} SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue while queue: SCREAMING_SNAKE_CASE : str = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = vertex queue.append(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ ) if target_vertex_parent is None: SCREAMING_SNAKE_CASE : Tuple = ( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(lowerCamelCase_ ) return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}''' if __name__ == "__main__": __UpperCAmelCase = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
323
0
"""simple docstring""" import sys from pathlib import Path A = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) A = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} A = '''zero2''' A = '''zero3''' A = [ZEROa, ZEROa] def __A ( a_ :Optional[Any] , a_ :Optional[int] , a_ :List[Any]) -> Optional[int]: __a : Tuple = parameterized.to_safe_name('''_'''.join(str(lowerCamelCase_) for x in param.args)) return F"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test A = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __lowercase ( lowercase_ ): '''simple docstring''' @parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): self.run_and_check( stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , ) @require_torch_multi_gpu @parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): self.run_and_check( stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , ) @parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): self.run_and_check( stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , ) @require_torch_multi_gpu @parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): self.run_and_check( stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , ) def _lowerCamelCase ( self , _UpperCAmelCase ): pass def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = True , ): __a : Optional[Any] = models[model] __a : Optional[int] = self.run_trainer( stage=lowerCamelCase_ , model_name=lowerCamelCase_ , eval_steps=lowerCamelCase_ , num_train_epochs=1 , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , ) self.do_checks(lowerCamelCase_ ) return output_dir def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10 , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = True , ): __a : Optional[int] = self.get_auto_remove_tmp_dir('''./xxx''' , after=lowerCamelCase_ ) __a : str = f""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(lowerCamelCase_ )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a : Optional[int] = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() __a : Optional[Any] = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] __a : Optional[int] = self.get_launcher(lowerCamelCase_ ) __a : Optional[int] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowerCamelCase_ , env=self.get_env() ) return output_dir def _lowerCamelCase ( self , _UpperCAmelCase=False ): __a : List[str] = min(2 , get_gpu_count() ) if distributed else 1 return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
160
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCAmelCase = 0 __UpperCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCAmelCase = tuple[int, int] class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = pos_x SCREAMING_SNAKE_CASE : Any = pos_y SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x) SCREAMING_SNAKE_CASE : Tuple = goal_x SCREAMING_SNAKE_CASE : List[str] = goal_y SCREAMING_SNAKE_CASE : Optional[Any] = g_cost SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : int = self.calculate_heuristic() SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ): '''simple docstring''' return self.f_cost < other.f_cost class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = [self.start] SCREAMING_SNAKE_CASE : list[Node] = [] SCREAMING_SNAKE_CASE : str = False def lowerCamelCase_ ( self : Any ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowerCamelCase_ ) self.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCamelCase_ ) else: self.open_nodes.append(lowerCamelCase_ ) return [self.start.pos] def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [] for action in delta: SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1] SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) ) return successors def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = node SCREAMING_SNAKE_CASE : List[str] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent path.reverse() return path class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = False def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCamelCase_ , lowerCamelCase_ ) self.fwd_astar.closed_nodes.append(lowerCamelCase_ ) self.bwd_astar.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node SCREAMING_SNAKE_CASE : Any = current_fwd_node SCREAMING_SNAKE_CASE : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop( astar.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCamelCase_ ) else: astar.open_nodes.append(lowerCamelCase_ ) return [self.fwd_astar.start.pos] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ ) bwd_path.pop() bwd_path.reverse() SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCAmelCase = (0, 0) __UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCAmelCase = time.time() __UpperCAmelCase = AStar(init, goal) __UpperCAmelCase = a_star.search() __UpperCAmelCase = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') __UpperCAmelCase = time.time() __UpperCAmelCase = BidirectionalAStar(init, goal) __UpperCAmelCase = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
323
0
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _a = logging.getLogger(__name__) def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]: """simple docstring""" return (preds == labels).mean() @dataclass class __lowerCamelCase : """simple docstring""" UpperCamelCase__ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) UpperCamelCase__ = field( default=lowercase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"}) UpperCamelCase__ = field( default=lowercase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) UpperCamelCase__ = field( default=lowercase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class __lowerCamelCase : """simple docstring""" UpperCamelCase__ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys())}) UpperCamelCase__ = field(metadata={"help": "Should contain the data files for the task."}) UpperCamelCase__ = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCamelCase__ = field( default=lowercase_ , metadata={"help": "Overwrite the cached training and evaluation sets"}) def __A ( )-> Tuple: """simple docstring""" _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , lowerCamelCase_ ) # Set seed set_seed(training_args.seed ) try: _UpperCAmelCase = processors[data_args.task_name]() _UpperCAmelCase = processor.get_labels() _UpperCAmelCase = len(lowerCamelCase_ ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) _UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , ) # Get datasets _UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) _UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__lowerCAmelCase ) -> Dict: _UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )} # Data collator _UpperCAmelCase = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _UpperCAmelCase = Trainer( model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _UpperCAmelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate() _UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(lowerCamelCase_ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , lowerCamelCase_ , lowerCamelCase_ ) writer.write('%s = %s\n' % (key, value) ) results.update(lowerCamelCase_ ) return results def __A ( __lowerCAmelCase )-> Optional[int]: """simple docstring""" main() if __name__ == "__main__": main()
39
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''efficientnet''' def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : int = width_coefficient SCREAMING_SNAKE_CASE : List[str] = depth_coefficient SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor SCREAMING_SNAKE_CASE : List[str] = kernel_sizes SCREAMING_SNAKE_CASE : Dict = in_channels SCREAMING_SNAKE_CASE : List[str] = out_channels SCREAMING_SNAKE_CASE : Any = depthwise_padding SCREAMING_SNAKE_CASE : Dict = strides SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats SCREAMING_SNAKE_CASE : Any = expand_ratios SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dim SCREAMING_SNAKE_CASE : List[str] = pooling_type SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : Any = batch_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum SCREAMING_SNAKE_CASE : Dict = dropout_rate SCREAMING_SNAKE_CASE : int = drop_connect_rate SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4 class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return 1e-5
323
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PegasusXForConditionalGeneration''', '''PegasusXModel''', '''PegasusXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
153
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {} SCREAMING_SNAKE_CASE : List[Any] = {} if prompt is not None: SCREAMING_SNAKE_CASE : List[Any] = prompt if generate_kwargs is not None: SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: SCREAMING_SNAKE_CASE : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ): '''simple docstring''' return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ ) if prompt is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type if model_type == "git": SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: SCREAMING_SNAKE_CASE : Optional[Any] = None return model_inputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ): '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): SCREAMING_SNAKE_CASE : List[str] = None if generate_kwargs is None: SCREAMING_SNAKE_CASE : int = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name ) SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ ) return model_outputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] for output_ids in model_outputs: SCREAMING_SNAKE_CASE : List[Any] = { """generated_text""": self.tokenizer.decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , ) } records.append(lowerCamelCase_ ) return records
323
0
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class A_ ( unittest.TestCase ): def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self : Dict ) -> List[str]: __lowerCAmelCase: str = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) __lowerCAmelCase: Union[str, Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) sd_pipe.set_scheduler('sample_euler' ) __lowerCAmelCase: List[str] = """A painting of a squirrel eating a burger""" __lowerCAmelCase: Optional[int] = torch.manual_seed(0 ) __lowerCAmelCase: List[str] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' ) __lowerCAmelCase: Optional[Any] = output.images __lowerCAmelCase: Any = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCAmelCase: List[Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase ( self : List[Any] ) -> Any: __lowerCAmelCase: List[str] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) __lowerCAmelCase: Tuple = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) sd_pipe.set_scheduler('sample_euler' ) __lowerCAmelCase: Optional[Any] = """A painting of a squirrel eating a burger""" __lowerCAmelCase: List[str] = torch.manual_seed(0 ) __lowerCAmelCase: List[Any] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' ) __lowerCAmelCase: Tuple = output.images __lowerCAmelCase: Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCAmelCase: Optional[Any] = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def UpperCAmelCase ( self : int ) -> int: __lowerCAmelCase: int = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) __lowerCAmelCase: Any = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) sd_pipe.set_scheduler('sample_dpmpp_2m' ) __lowerCAmelCase: Optional[int] = """A painting of a squirrel eating a burger""" __lowerCAmelCase: Optional[int] = torch.manual_seed(0 ) __lowerCAmelCase: Union[str, Any] = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=lowerCamelCase_ , ) __lowerCAmelCase: Any = output.images __lowerCAmelCase: str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCAmelCase: Dict = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
322
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,) SCREAMING_SNAKE_CASE__ = 10 def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = { """num_train_timesteps""": 2_01, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**lowerCamelCase_ ) return config def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0] SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1] SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 1 scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = scheduler.timesteps SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCamelCase_ ): # 1. scale model input SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0] scheduler.set_timesteps(timesteps=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Dict = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0] with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowerCamelCase_ )
323
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class lowerCamelCase__ ( lowercase_ , lowercase_): SCREAMING_SNAKE_CASE__ = '''nat''' SCREAMING_SNAKE_CASE__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__(self , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=6_4 , UpperCAmelCase=[3, 4, 6, 5] , UpperCAmelCase=[2, 4, 8, 1_6] , UpperCAmelCase=7 , UpperCAmelCase=3.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> List[Any]: super().__init__(**lowerCamelCase_ ) _lowercase =patch_size _lowercase =num_channels _lowercase =embed_dim _lowercase =depths _lowercase =len(lowerCamelCase_ ) _lowercase =num_heads _lowercase =kernel_size _lowercase =mlp_ratio _lowercase =qkv_bias _lowercase =hidden_dropout_prob _lowercase =attention_probs_dropout_prob _lowercase =drop_path_rate _lowercase =hidden_act _lowercase =layer_norm_eps _lowercase =initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowercase =int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) _lowercase =layer_scale_init_value _lowercase =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )] _lowercase =get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
5
'''simple docstring''' from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ): '''simple docstring''' super().__init__( lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE : Optional[int] = Text( cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[str] = None self.builder.download_and_prepare( download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE : int = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory ) return dataset
323
0
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
196
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 3_84 SCREAMING_SNAKE_CASE : Union[str, Any] = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE : List[str] = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2) SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE : Any = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE : int = 1_28 SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE : Optional[Any] = 12 SCREAMING_SNAKE_CASE : str = 5_12 elif "large" in model_name: SCREAMING_SNAKE_CASE : Tuple = 1_92 SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48) SCREAMING_SNAKE_CASE : Tuple = 12 SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68 # set label information SCREAMING_SNAKE_CASE : List[str] = 1_50 SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files""" SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig( embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) SCREAMING_SNAKE_CASE : List[str] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = val def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :] # fmt: on def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = x.shape[0] SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = x.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCamelCase_ , param.shape ) SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ ) if "bn" in key: SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" ) SCREAMING_SNAKE_CASE : Optional[Any] = val # rename keys SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ ) if "norm" in key: SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor() SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCAmelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
323
0
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer a_ = logging.get_logger(__name__) # pylint: disable=invalid-name a_ = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n' @dataclass class __SCREAMING_SNAKE_CASE ( lowercase_ ): snake_case_ = 42 class __SCREAMING_SNAKE_CASE ( lowercase_ ): def __init__( self : List[str] , __lowercase : PriorTransformer , __lowercase : CLIPVisionModel , __lowercase : CLIPImageProcessor , __lowercase : HeunDiscreteScheduler , __lowercase : ShapERenderer , ) -> Union[str, Any]: super().__init__() self.register_modules( prior=lowerCamelCase_ , image_encoder=lowerCamelCase_ , image_processor=lowerCamelCase_ , scheduler=lowerCamelCase_ , renderer=lowerCamelCase_ , ) def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Any , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Tuple ) -> int: if latents is None: SCREAMING_SNAKE_CASE__ : Optional[Any] =randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ ) else: if latents.shape != shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" ) SCREAMING_SNAKE_CASE__ : Tuple =latents.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =latents * scheduler.init_noise_sigma return latents def __magic_name__ ( self : Tuple , __lowercase : Any=0 ) -> Union[str, Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) SCREAMING_SNAKE_CASE__ : int =torch.device(F"cuda:{gpu_id}" ) SCREAMING_SNAKE_CASE__ : str =[self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase_ , lowerCamelCase_ ) @property def __magic_name__ ( self : Optional[int] ) -> List[Any]: if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(lowerCamelCase_ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def __magic_name__ ( self : Dict , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Union[str, Any] , ) -> str: if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(image[0] , torch.Tensor ): SCREAMING_SNAKE_CASE__ : Dict =torch.cat(lowerCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCamelCase_ , axis=0 ) if not isinstance(lowerCamelCase_ , torch.Tensor ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.image_processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : List[str] =image.to(dtype=self.image_encoder.dtype , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE__ : Any =self.image_encoder(lowerCamelCase_ )["""last_hidden_state"""] SCREAMING_SNAKE_CASE__ : List[str] =image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 SCREAMING_SNAKE_CASE__ : List[str] =image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) if do_classifier_free_guidance: SCREAMING_SNAKE_CASE__ : Dict =torch.zeros_like(lowerCamelCase_ ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes SCREAMING_SNAKE_CASE__ : str =torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(lowerCamelCase_ ) def __call__( self : Any , __lowercase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowercase : int = 1 , __lowercase : int = 25 , __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : float = 4.0 , __lowercase : int = 64 , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , ) -> Optional[Any]: if isinstance(lowerCamelCase_ , PIL.Image.Image ): SCREAMING_SNAKE_CASE__ : int =1 elif isinstance(lowerCamelCase_ , torch.Tensor ): SCREAMING_SNAKE_CASE__ : Dict =image.shape[0] elif isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): SCREAMING_SNAKE_CASE__ : int =len(lowerCamelCase_ ) else: raise ValueError( F"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCamelCase_ )}" ) SCREAMING_SNAKE_CASE__ : List[str] =self._execution_device SCREAMING_SNAKE_CASE__ : List[str] =batch_size * num_images_per_prompt SCREAMING_SNAKE_CASE__ : Union[str, Any] =guidance_scale > 1.0 SCREAMING_SNAKE_CASE__ : List[Any] =self._encode_image(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # prior self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.scheduler.timesteps SCREAMING_SNAKE_CASE__ : List[str] =self.prior.config.num_embeddings SCREAMING_SNAKE_CASE__ : str =self.prior.config.embedding_dim SCREAMING_SNAKE_CASE__ : str =self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim SCREAMING_SNAKE_CASE__ : int =latents.reshape(latents.shape[0] , lowerCamelCase_ , lowerCamelCase_ ) for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE__ : Optional[int] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE__ : str =self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.prior( lowerCamelCase_ , timestep=lowerCamelCase_ , proj_embedding=lowerCamelCase_ , ).predicted_image_embedding # remove the variance SCREAMING_SNAKE_CASE__ : Tuple =noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: SCREAMING_SNAKE_CASE__ : Tuple =noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE__ : Optional[Any] =noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) SCREAMING_SNAKE_CASE__ : Optional[int] =self.scheduler.step( lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=lowerCamelCase_ ) SCREAMING_SNAKE_CASE__ : str =[] for i, latent in enumerate(lowerCamelCase_ ): print() SCREAMING_SNAKE_CASE__ : List[str] =self.renderer.decode( latent[None, :] , lowerCamelCase_ , size=lowerCamelCase_ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , ) images.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE__ : str =torch.stack(lowerCamelCase_ ) if output_type not in ["np", "pil"]: raise ValueError(F"Only the output types `pil` and `np` are supported not output_type={output_type}" ) SCREAMING_SNAKE_CASE__ : str =images.cpu().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE__ : Optional[int] =[self.numpy_to_pil(lowerCamelCase_ ) for image in images] # Offload last model to CPU if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=lowerCamelCase_ )
152
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Any = pad_token_id SCREAMING_SNAKE_CASE : List[Any] = max_length SCREAMING_SNAKE_CASE : Optional[int] = vocab SCREAMING_SNAKE_CASE : List[Any] = merges SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab() return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ): '''simple docstring''' return cls(**lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ ) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs( lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
323
0
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = F'''{sampling_rate}''' __SCREAMING_SNAKE_CASE : Union[str, Any] = """1""" __SCREAMING_SNAKE_CASE : Optional[Any] = """f32le""" __SCREAMING_SNAKE_CASE : Union[str, Any] = [ """ffmpeg""", """-i""", """pipe:0""", """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] try: with subprocess.Popen(lowerCamelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: __SCREAMING_SNAKE_CASE : List[Any] = ffmpeg_process.communicate(lowerCamelCase_ ) except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error __SCREAMING_SNAKE_CASE : str = output_stream[0] __SCREAMING_SNAKE_CASE : int = np.frombuffer(lowerCamelCase_ , np.floataa ) if audio.shape[0] == 0: raise ValueError('''Malformed soundfile''' ) return audio def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ): __SCREAMING_SNAKE_CASE : Optional[int] = F'''{sampling_rate}''' __SCREAMING_SNAKE_CASE : Optional[int] = """1""" if format_for_conversion == "s16le": __SCREAMING_SNAKE_CASE : Tuple = 2 elif format_for_conversion == "f32le": __SCREAMING_SNAKE_CASE : Union[str, Any] = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) __SCREAMING_SNAKE_CASE : int = platform.system() if system == "Linux": __SCREAMING_SNAKE_CASE : List[str] = """alsa""" __SCREAMING_SNAKE_CASE : int = """default""" elif system == "Darwin": __SCREAMING_SNAKE_CASE : Union[str, Any] = """avfoundation""" __SCREAMING_SNAKE_CASE : str = """:0""" elif system == "Windows": __SCREAMING_SNAKE_CASE : Optional[int] = """dshow""" __SCREAMING_SNAKE_CASE : int = """default""" __SCREAMING_SNAKE_CASE : List[Any] = [ """ffmpeg""", """-f""", format_, """-i""", input_, """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-fflags""", """nobuffer""", """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] __SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample __SCREAMING_SNAKE_CASE : Any = _ffmpeg_stream(lowerCamelCase_ , lowerCamelCase_ ) for item in iterator: yield item def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ): if stream_chunk_s is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = stream_chunk_s else: __SCREAMING_SNAKE_CASE : List[Any] = chunk_length_s __SCREAMING_SNAKE_CASE : int = ffmpeg_microphone(lowerCamelCase_ , lowerCamelCase_ , format_for_conversion=lowerCamelCase_ ) if format_for_conversion == "s16le": __SCREAMING_SNAKE_CASE : Optional[Any] = np.intaa __SCREAMING_SNAKE_CASE : Dict = 2 elif format_for_conversion == "f32le": __SCREAMING_SNAKE_CASE : Optional[Any] = np.floataa __SCREAMING_SNAKE_CASE : str = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: __SCREAMING_SNAKE_CASE : str = chunk_length_s / 6 __SCREAMING_SNAKE_CASE : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase_ , (int, float) ): __SCREAMING_SNAKE_CASE : Tuple = [stride_length_s, stride_length_s] __SCREAMING_SNAKE_CASE : Optional[int] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample __SCREAMING_SNAKE_CASE : Tuple = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample __SCREAMING_SNAKE_CASE : Any = datetime.datetime.now() __SCREAMING_SNAKE_CASE : Optional[int] = datetime.timedelta(seconds=lowerCamelCase_ ) for item in chunk_bytes_iter(lowerCamelCase_ , lowerCamelCase_ , stride=(stride_left, stride_right) , stream=lowerCamelCase_ ): # Put everything back in numpy scale __SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(item['''raw'''] , dtype=lowerCamelCase_ ) __SCREAMING_SNAKE_CASE : List[str] = ( item["""stride"""][0] // size_of_sample, item["""stride"""][1] // size_of_sample, ) __SCREAMING_SNAKE_CASE : Tuple = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ): __SCREAMING_SNAKE_CASE : Optional[Any] = B"""""" __SCREAMING_SNAKE_CASE : Dict = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase_ ) < chunk_len: __SCREAMING_SNAKE_CASE : Dict = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase_ ) >= chunk_len: # We are flushing the accumulator __SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right) __SCREAMING_SNAKE_CASE : List[Any] = {"""raw""": acc[:chunk_len], """stride""": stride} if stream: __SCREAMING_SNAKE_CASE : Optional[Any] = False yield item __SCREAMING_SNAKE_CASE : Optional[int] = stride_left __SCREAMING_SNAKE_CASE : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase_ ) > stride_left: __SCREAMING_SNAKE_CASE : Union[str, Any] = {"""raw""": acc, """stride""": (_stride_left, 0)} if stream: __SCREAMING_SNAKE_CASE : Tuple = False yield item def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : List[Any] = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase_ , stdout=subprocess.PIPE , bufsize=lowerCamelCase_ ) as ffmpeg_process: while True: __SCREAMING_SNAKE_CASE : Optional[int] = ffmpeg_process.stdout.read(lowerCamelCase_ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
9
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train""" SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Union[str, Any] = streaming SCREAMING_SNAKE_CASE : Optional[int] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Any ): '''simple docstring''' pass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : int = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Tuple = streaming SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Dict ): '''simple docstring''' pass
323
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ : Dict ='''▁''' lowerCAmelCase__ : Any ={'''vocab_file''': '''spiece.model'''} lowerCAmelCase__ : List[Any] ={ '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''} } lowerCAmelCase__ : List[Any] ={ '''google/pegasus-xsum''': 512, } lowerCAmelCase__ : int =logging.get_logger(__name__) class UpperCAmelCase_ ( lowercase_ ): '''simple docstring''' UpperCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES UpperCamelCase__ : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self , _A , _A="<pad>" , _A="</s>" , _A="<unk>" , _A="<mask_2>" , _A="<mask_1>" , _A=None , _A=103 , _A = None , **_A , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = offset if additional_special_tokens is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError( f"""additional_special_tokens should be of type {type(lowerCamelCase_ )}, but is""" f""" {type(lowerCamelCase_ )}""" ) __SCREAMING_SNAKE_CASE = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(lowerCamelCase_ ) , self.offset - 1 ) ] if len(set(lowerCamelCase_ ) ) != len(lowerCamelCase_ ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" ) __SCREAMING_SNAKE_CASE = additional_special_tokens_extended else: __SCREAMING_SNAKE_CASE = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )] __SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token_sent=lowerCamelCase_ , offset=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) __SCREAMING_SNAKE_CASE = mask_token_sent __SCREAMING_SNAKE_CASE = vocab_file __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCamelCase_ ) # add special tokens to encoder dict __SCREAMING_SNAKE_CASE = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) __SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} @property def _A ( self ): '''simple docstring''' return len(self.sp_model ) + self.offset def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.__dict__.copy() __SCREAMING_SNAKE_CASE = None return state def __setstate__( self , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _A ( self , _A ): '''simple docstring''' return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ ) def _A ( self , _A ): '''simple docstring''' if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] __SCREAMING_SNAKE_CASE = self.sp_model.piece_to_id(lowerCamelCase_ ) return sp_id + self.offset def _A ( self , _A ): '''simple docstring''' if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: __SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(index - self.offset ) return token def _A ( self , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCamelCase_ ) + token __SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(lowerCamelCase_ ) out_string += self.sp_model.decode(lowerCamelCase_ ) return out_string.strip() def _A ( self , _A=False ): '''simple docstring''' return 1 def _A ( self , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _A ( self , _A , _A = None , _A = False ): '''simple docstring''' if already_has_special_tokens: return self._special_token_mask(lowerCamelCase_ ) elif token_ids_a is None: return self._special_token_mask(lowerCamelCase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A ( self , _A , _A=None ): '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self , _A , _A = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __SCREAMING_SNAKE_CASE = os.path.join( lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ , 'wb' ) as fi: __SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,)
257
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small""" SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = """en_speaker_1""" SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string""" SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json""" SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings""" def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) SCREAMING_SNAKE_CASE : List[str] = 35 SCREAMING_SNAKE_CASE : List[Any] = 2 SCREAMING_SNAKE_CASE : int = 8 SCREAMING_SNAKE_CASE : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string ) SCREAMING_SNAKE_CASE : Tuple = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
323
0
'''simple docstring''' from __future__ import annotations import math def lowerCamelCase ( lowerCAmelCase : str ): """simple docstring""" if num <= 0: __magic_name__ : Optional[int] = f'{num}: Invalid input, please enter a positive integer.' raise ValueError(lowerCamelCase_ ) __magic_name__ : Tuple = [True] * (num + 1) __magic_name__ : Optional[int] = [] __magic_name__ : Union[str, Any] = 2 __magic_name__ : Optional[Any] = int(math.sqrt(lowerCamelCase_ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(lowerCamelCase_ ) # Set multiples of start be False for i in range(start * start , num + 1 , lowerCamelCase_ ): if sieve[i] is True: __magic_name__ : int = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(lowerCamelCase_ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
331
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCAmelCase = logging.getLogger(__name__) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return (preds == labels).mean() @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ ) # Set seed set_seed(training_args.seed ) try: SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]() SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels() SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , ) # Get datasets SCREAMING_SNAKE_CASE : Optional[Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE : Dict = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase_ ) -> Dict: SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )} # Data collator SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE : Any = Trainer( model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE : Optional[Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate() SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase_ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(lowerCamelCase_ ) return results def __A ( lowerCamelCase_ ): """simple docstring""" main() if __name__ == "__main__": main()
323
0
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __UpperCamelCase : Dict = logging.get_logger(__name__) class __lowerCAmelCase ( lowercase_ ): def __init__( self :Any , *__magic_name__ :str , **__magic_name__ :Dict ): '''simple docstring''' warnings.warn( """The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use BeitImageProcessor instead.""" , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
228
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block SCREAMING_SNAKE_CASE : int = torch.nn.Convad( lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] ) # down SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0] for i, down_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = output_channel SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i] SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block( lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) self.down_blocks.append(lowerCamelCase_ ) # mid SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # out SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU() SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Tuple = False def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = x SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[Any] ): def custom_forward(*lowerCamelCase_ : List[str] ): return module(*lowerCamelCase_ ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ ) else: # down for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ ) # post-process SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : int = layers_per_block SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad( lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] ) SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None # mid SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # up SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : List[Any] = get_up_block( lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , ) self.up_blocks.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = output_channel # out if norm_type == "spatial": SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : Dict = nn.SiLU() SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Dict = False def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = z SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[str] ): def custom_forward(*lowerCamelCase_ : str ): return module(*lowerCamelCase_ ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ ) # post-process if latent_embeds is None: SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = n_e SCREAMING_SNAKE_CASE : int = vq_embed_dim SCREAMING_SNAKE_CASE : Tuple = beta SCREAMING_SNAKE_CASE : Union[str, Any] = legacy SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) SCREAMING_SNAKE_CASE : Optional[Any] = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0] SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed SCREAMING_SNAKE_CASE : Any = self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: SCREAMING_SNAKE_CASE : Optional[int] = n_e SCREAMING_SNAKE_CASE : Any = sane_index_shape def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 ) SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1 if self.unknown_index == "random": SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: SCREAMING_SNAKE_CASE : Any = self.unknown_index return new.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) if self.re_embed > self.used.shape[0]: # extra token SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ ) return back.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous() SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 ) SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape ) SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : List[str] = None # compute loss for embedding if not self.legacy: SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach() # reshape back to match original input shape SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): '''simple docstring''' if self.remap is not None: SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again # get quantized latent vectors SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ ) if shape is not None: SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ ) # reshape back to match original input shape SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = parameters SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 ) SCREAMING_SNAKE_CASE : Dict = deterministic SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar ) SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar ) if self.deterministic: SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = randn_tensor( self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample return x def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.mean
323
0
"""simple docstring""" from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
160
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : str = 3 SCREAMING_SNAKE_CASE : List[Any] = (32, 32) SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input return init_dict, inputs_dict
323
0