code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import heapq import sys import numpy as np lowerCAmelCase_ = tuple[int, int] class lowerCamelCase : def __init__( self ) -> Any: snake_case = [] snake_case = set() def _lowerCamelCase ( self ) -> Optional[Any]: if not self.empty(): return self.elements[0][0] else: return float('inf' ) def _lowerCamelCase ( self ) -> Any: return len(self.elements ) == 0 def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Union[str, Any]: if item not in self.set: heapq.heappush(self.elements, (priority, item) ) self.set.add(lowercase_ ) else: # update # print("update", item) snake_case = [] ((snake_case) , (snake_case)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((snake_case) , (snake_case)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements, (pro, xxx) ) def _lowerCamelCase ( self, lowercase_ ) -> Dict: if item in self.set: self.set.remove(lowercase_ ) snake_case = [] ((snake_case) , (snake_case)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((snake_case) , (snake_case)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements, (prito, yyy) ) def _lowerCamelCase ( self ) -> Tuple: return self.elements[0][1] def _lowerCamelCase ( self ) -> Dict: ((snake_case) , (snake_case)) = heapq.heappop(self.elements ) self.set.remove(lowercase_ ) return (priority, item) def __magic_name__ ( A , A ) -> Optional[Any]: # euclidean distance snake_case = np.array(A ) snake_case = np.array(A ) return np.linalg.norm(a - b ) def __magic_name__ ( A , A ) -> str: # integer division by time variable return consistent_heuristic(A , A ) // t def __magic_name__ ( A , A ) -> List[Any]: # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def __magic_name__ ( A , A , A , A ) -> Dict: snake_case = g_function[start] + Wa * heuristics[i](A , A ) return ans def __magic_name__ ( A , A , A ) -> Optional[Any]: snake_case = np.chararray((n, n) ) for i in range(A ): for j in range(A ): snake_case = '*' for i in range(A ): for j in range(A ): if (j, (n - 1) - i) in blocks: snake_case = '#' snake_case = '-' snake_case = back_pointer[goal] while x != start: ((snake_case) , (snake_case)) = x # print(x) snake_case = '-' snake_case = back_pointer[x] snake_case = '-' for i in range(A ): for j in range(A ): if (i, j) == (0, n - 1): print(grid[i][j] , end=' ' ) print('<-- End position' , end=' ' ) else: print(grid[i][j] , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) print('PATH TAKEN BY THE ALGORITHM IS:-' ) snake_case = back_pointer[goal] while x != start: print(A , end=' ' ) snake_case = back_pointer[x] print(A ) sys.exit() def __magic_name__ ( A ) -> Dict: if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def __magic_name__ ( A , A , A , A , A , A , A , A , ) -> int: for itera in range(A ): open_list[itera].remove_element(A ) # print("s", s) # print("j", j) ((snake_case) , (snake_case)) = s snake_case = (x - 1, y) snake_case = (x + 1, y) snake_case = (x, y + 1) snake_case = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(A ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(A ) snake_case = -1 snake_case = float('inf' ) if valid(A ) and g_function[neighbours] > g_function[s] + 1: snake_case = g_function[s] + 1 snake_case = s if neighbours not in close_list_anchor: open_list[0].put(A , key(A , 0 , A , A ) ) if neighbours not in close_list_inad: for var in range(1 , A ): if key(A , A , A , A ) <= Wa * key( A , 0 , A , A ): open_list[j].put( A , key(A , A , A , A ) ) def __magic_name__ ( ) -> List[Any]: snake_case = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list lowerCAmelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} lowerCAmelCase_ = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (1_0, 1), (1_1, 1), (1_2, 1), (1_3, 1), (1_4, 1), (1_5, 1), (1_6, 1), (1_7, 1), (1_8, 1), (1_9, 1), ] lowerCAmelCase_ = make_common_ground() lowerCAmelCase_ = blocks_blk # hyper parameters lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = 2_0 lowerCAmelCase_ = 3 # one consistent and two other inconsistent # start and end destination lowerCAmelCase_ = (0, 0) lowerCAmelCase_ = (n - 1, n - 1) lowerCAmelCase_ = 1 def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = {start: 0, goal: float('inf' )} snake_case = {start: -1, goal: -1} snake_case = [] snake_case = set() for i in range(A ): open_list.append(PriorityQueue() ) open_list[i].put(A , key(A , A , A , A ) ) snake_case = [] snake_case = [] while open_list[0].minkey() < float('inf' ): for i in range(1 , A ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('inf' ): do_something(A , A , A ) else: snake_case , snake_case = open_list[i].top_show() visited.add(A ) expand_state( A , A , A , A , A , A , A , A , ) close_list_inad.append(A ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('inf' ): do_something(A , A , A ) else: snake_case = open_list[0].top_show() visited.add(A ) expand_state( A , 0 , A , A , A , A , A , A , ) close_list_anchor.append(A ) print('No path found to goal' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(A ): if (j, i) in blocks: print('#' , end=' ' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('*' , end=' ' ) else: print('-' , end=' ' ) else: print('*' , end=' ' ) if (j, i) == (n - 1, n - 1): print('<-- End position' , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
361
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''roberta''' def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
0
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, **lowercase_ ) -> List[str]: super().__init__(**lowercase_ ) requires_backends(self, 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self, lowercase_, **lowercase_ ) -> Union[str, Any]: return super().__call__(lowercase_, **lowercase_ ) def _lowerCamelCase ( self, **lowercase_ ) -> Union[str, Any]: snake_case = {} if "candidate_labels" in kwargs: snake_case = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: snake_case = kwargs['hypothesis_template'] return preprocess_params, {}, {} def _lowerCamelCase ( self, lowercase_, lowercase_=None, lowercase_="This is a photo of {}." ) -> Optional[int]: snake_case = load_image(lowercase_ ) snake_case = self.image_processor(images=[image], return_tensors=self.framework ) snake_case = candidate_labels snake_case = [hypothesis_template.format(lowercase_ ) for x in candidate_labels] snake_case = self.tokenizer(lowercase_, return_tensors=self.framework, padding=lowercase_ ) snake_case = [text_inputs] return inputs def _lowerCamelCase ( self, lowercase_ ) -> Optional[int]: snake_case = model_inputs.pop('candidate_labels' ) snake_case = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0], lowercase_ ): snake_case = text_inputs[0] else: # Batching case. snake_case = text_inputs[0][0] snake_case = self.model(**lowercase_, **lowercase_ ) snake_case = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def _lowerCamelCase ( self, lowercase_ ) -> int: snake_case = model_outputs.pop('candidate_labels' ) snake_case = model_outputs['logits'][0] if self.framework == "pt": snake_case = logits.softmax(dim=-1 ).squeeze(-1 ) snake_case = probs.tolist() if not isinstance(lowercase_, lowercase_ ): snake_case = [scores] elif self.framework == "tf": snake_case = stable_softmax(lowercase_, axis=-1 ) snake_case = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) snake_case = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(lowercase_, lowercase_ ), key=lambda lowercase_ : -x[0] ) ] return result
362
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } lowerCAmelCase_ = { "allenai/led-base-16384": 1_6_3_8_4, } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = LEDTokenizer snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int: super().__init__( lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, ) snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) ) snake_case = add_prefix_space snake_case = pre_tok_class(**lowercase_ ) snake_case = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case = 'post_processor' snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ ) if tokenizer_component_instance: snake_case = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case = tuple(state['sep'] ) if "cls" in state: snake_case = tuple(state['cls'] ) snake_case = False if state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = add_prefix_space snake_case = True if state.get('trim_offsets', lowercase_ ) != trim_offsets: snake_case = trim_offsets snake_case = True if changes_to_apply: snake_case = getattr(lowercase_, state.pop('type' ) ) snake_case = component_class(**lowercase_ ) setattr(self.backend_tokenizer, lowercase_, lowercase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self, lowercase_ ) -> Any: snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value snake_case = value def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ ) return tuple(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict: snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict: snake_case = super()._pad( encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, ) # Load from model defaults if return_attention_mask is None: snake_case = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ ) if needs_to_be_padded: snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": snake_case = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
332
0
'''simple docstring''' def __magic_name__ ( A , A ) -> str: if b == 0: return 1 if (b % 2) == 0: return actual_power(A , int(b / 2 ) ) * actual_power(A , int(b / 2 ) ) else: return a * actual_power(A , int(b / 2 ) ) * actual_power(A , int(b / 2 ) ) def __magic_name__ ( A , A ) -> float: if b < 0: return 1 / actual_power(A , A ) return actual_power(A , A ) if __name__ == "__main__": print(power(-2, -3))
363
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __magic_name__ ( A ) -> Tuple: snake_case = [] embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', F'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', F'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', F'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', F'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [] attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def __magic_name__ ( A ) -> List[Any]: snake_case = [] token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def __magic_name__ ( ) -> Dict: snake_case = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def __magic_name__ ( A , A , A , A ) -> int: snake_case = 'imagenet-1k-id2label.json' snake_case = 1_0_0_0 snake_case = 'huggingface/label-files' snake_case = num_labels snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": snake_case = [1, 2, 1_0] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": snake_case = [1, 4, 1_6] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case = [2, 2, 2_0] snake_case = [3, 1_2, 1_6] snake_case = [1_9_2, 7_6_8, 1_0_2_4] snake_case = CvtForImageClassification(A ) snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) snake_case = image_size snake_case = torch.load(A , map_location=torch.device('cpu' ) ) snake_case = OrderedDict() snake_case = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case = list_of_state_dict + cls_token(A ) snake_case = list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): snake_case = list_of_state_dict + attention(A , A ) snake_case = list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): snake_case = original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=3_8_4, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
332
0
'''simple docstring''' from __future__ import annotations class lowerCamelCase : def __init__( self, lowercase_, lowercase_ ) -> str: snake_case , snake_case = text, pattern snake_case , snake_case = len(lowercase_ ), len(lowercase_ ) def _lowerCamelCase ( self, lowercase_ ) -> int: for i in range(self.patLen - 1, -1, -1 ): if char == self.pattern[i]: return i return -1 def _lowerCamelCase ( self, lowercase_ ) -> int: for i in range(self.patLen - 1, -1, -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def _lowerCamelCase ( self ) -> list[int]: # searches pattern in text and returns index positions snake_case = [] for i in range(self.textLen - self.patLen + 1 ): snake_case = self.mismatch_in_text(lowercase_ ) if mismatch_index == -1: positions.append(lowercase_ ) else: snake_case = self.match_in_pattern(self.text[mismatch_index] ) snake_case = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions lowerCAmelCase_ = "ABAABA" lowerCAmelCase_ = "AB" lowerCAmelCase_ = BoyerMooreSearch(text, pattern) lowerCAmelCase_ = bms.bad_character_heuristic() if len(positions) == 0: print("No match found") else: print("Pattern found in following positions: ") print(positions)
364
'''simple docstring''' from pathlib import Path import fire def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = Path(A ) snake_case = Path(A ) dest_dir.mkdir(exist_ok=A ) for path in src_dir.iterdir(): snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n] snake_case = dest_dir.joinpath(path.name ) print(A ) dest_path.open('w' ).write('\n'.join(A ) ) if __name__ == "__main__": fire.Fire(minify)
332
0
'''simple docstring''' import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowerCAmelCase_ = logging.get_logger(__name__) def __magic_name__ ( A , A , A , A ) -> Tuple[int, int]: def constraint_to_multiple_of(A , A , A=0 , A=None ): snake_case = round(val / multiple ) * multiple if max_val is not None and x > max_val: snake_case = math.floor(val / multiple ) * multiple if x < min_val: snake_case = math.ceil(val / multiple ) * multiple return x snake_case = (output_size, output_size) if isinstance(A , A ) else output_size snake_case , snake_case = get_image_size(A ) snake_case , snake_case = output_size # determine new height and width snake_case = output_height / input_height snake_case = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width snake_case = scale_width else: # fit height snake_case = scale_height snake_case = constraint_to_multiple_of(scale_height * input_height , multiple=A ) snake_case = constraint_to_multiple_of(scale_width * input_width , multiple=A ) return (new_height, new_width) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = ['''pixel_values'''] def __init__( self, lowercase_ = True, lowercase_ = None, lowercase_ = PILImageResampling.BILINEAR, lowercase_ = False, lowercase_ = 1, lowercase_ = True, lowercase_ = 1 / 255, lowercase_ = True, lowercase_ = None, lowercase_ = None, **lowercase_, ) -> None: super().__init__(**lowercase_ ) snake_case = size if size is not None else {'height': 384, 'width': 384} snake_case = get_size_dict(lowercase_ ) snake_case = do_resize snake_case = size snake_case = keep_aspect_ratio snake_case = ensure_multiple_of snake_case = resample snake_case = do_rescale snake_case = rescale_factor snake_case = do_normalize snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = False, lowercase_ = 1, lowercase_ = PILImageResampling.BICUBIC, lowercase_ = None, **lowercase_, ) -> np.ndarray: snake_case = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) snake_case = get_resize_output_image_size( lowercase_, output_size=(size['height'], size['width']), keep_aspect_ratio=lowercase_, multiple=lowercase_, ) return resize(lowercase_, size=lowercase_, resample=lowercase_, data_format=lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> List[str]: return rescale(lowercase_, scale=lowercase_, data_format=lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> np.ndarray: return normalize(lowercase_, mean=lowercase_, std=lowercase_, data_format=lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = ChannelDimension.FIRST, **lowercase_, ) -> PIL.Image.Image: snake_case = do_resize if do_resize is not None else self.do_resize snake_case = size if size is not None else self.size snake_case = get_size_dict(lowercase_ ) snake_case = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio snake_case = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of snake_case = resample if resample is not None else self.resample snake_case = do_rescale if do_rescale is not None else self.do_rescale snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case = do_normalize if do_normalize is not None else self.do_normalize snake_case = image_mean if image_mean is not None else self.image_mean snake_case = image_std if image_std is not None else self.image_std snake_case = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case = [to_numpy_array(lowercase_ ) for image in images] if do_resize: snake_case = [self.resize(image=lowercase_, size=lowercase_, resample=lowercase_ ) for image in images] if do_rescale: snake_case = [self.rescale(image=lowercase_, scale=lowercase_ ) for image in images] if do_normalize: snake_case = [self.normalize(image=lowercase_, mean=lowercase_, std=lowercase_ ) for image in images] snake_case = [to_channel_dimension_format(lowercase_, lowercase_ ) for image in images] snake_case = {'pixel_values': images} return BatchFeature(data=lowercase_, tensor_type=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple: snake_case = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase_ ) != len(lowercase_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(lowercase_ ): snake_case = target_sizes.numpy() snake_case = [] for idx in range(len(lowercase_ ) ): snake_case = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode='bilinear', align_corners=lowercase_ ) snake_case = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase_ ) else: snake_case = logits.argmax(dim=1 ) snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
365
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowerCAmelCase_ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def __magic_name__ ( A , A ) -> Union[str, Any]: inspect_dataset(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def __magic_name__ ( A , A ) -> int: inspect_metric(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_config_info(A , config_name=A ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> Any: with pytest.raises(A ): get_dataset_config_info(A , config_name=A ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __magic_name__ ( A , A ) -> Dict: snake_case = get_dataset_config_names(A ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_infos(A ) assert list(infos.keys() ) == expected_configs snake_case = expected_configs[0] assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> Any: snake_case = get_dataset_infos(A ) assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> int: with pytest.raises(A ): get_dataset_split_names(A , config_name=A )
332
0
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int: snake_case = [0] snake_case = 4_2 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target snake_case = 0 # the area corresponding to the grid that gives the product closest to target snake_case = 0 # an estimate of b, using the quadratic formula snake_case = 4_2 # the largest integer less than b_estimate snake_case = 4_2 # the largest integer less than b_estimate snake_case = 4_2 # the triangle number corresponding to b_floor snake_case = 4_2 # the triangle number corresponding to b_ceil snake_case = 4_2 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 snake_case = floor(A ) snake_case = ceil(A ) snake_case = triangle_numbers[b_floor] snake_case = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_first_guess * triangle_a snake_case = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_second_guess * triangle_a snake_case = idx_a * b_ceil return area if __name__ == "__main__": print(f"{solution() = }")
366
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
0
'''simple docstring''' def __magic_name__ ( A ) -> float: return 1_0 - x * x def __magic_name__ ( A , A ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) snake_case = a while (b - a) >= 0.01: # Find middle point snake_case = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: snake_case = c else: snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
367
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ = False class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> List[Any]: return 12 @property def _lowerCamelCase ( self ) -> Dict: return 12 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def _lowerCamelCase ( self ) -> List[Any]: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = 12 snake_case = 12 snake_case = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } snake_case = TransformeraDModel(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipeline( 'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', ) snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
332
0
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) snake_case_ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) snake_case_ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) snake_case_ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''train''' snake_case_ = '''dev''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int: snake_case = args snake_case = is_language_sensitive snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowercase_, lowercase_ ): try: snake_case = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) snake_case = mode # Load data features from cache or dataset file snake_case = 'v2' if args.version_2_with_negative else 'v1' snake_case = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: snake_case = time.time() snake_case = torch.load(lowercase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case = self.old_features['features'] snake_case = self.old_features.get('dataset', lowercase_ ) snake_case = self.old_features.get('examples', lowercase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: snake_case = self.processor.get_dev_examples(args.data_dir ) else: snake_case = self.processor.get_train_examples(args.data_dir ) snake_case , snake_case = squad_convert_examples_to_features( examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, ) snake_case = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: return len(self.features ) def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset snake_case = self.features[i] snake_case = torch.tensor(feature.input_ids, dtype=torch.long ) snake_case = torch.tensor(feature.attention_mask, dtype=torch.long ) snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long ) snake_case = torch.tensor(feature.cls_index, dtype=torch.long ) snake_case = torch.tensor(feature.p_mask, dtype=torch.float ) snake_case = torch.tensor(feature.is_impossible, dtype=torch.float ) snake_case = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case = torch.tensor(feature.start_position, dtype=torch.long ) snake_case = torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
368
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''note_seq'''] def __init__( self, *lowercase_, **lowercase_ ) -> str: requires_backends(self, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: requires_backends(cls, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: requires_backends(cls, ['note_seq'] )
332
0
'''simple docstring''' import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def __magic_name__ ( A ) -> Optional[Any]: snake_case = SwinConfig.from_pretrained( 'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) snake_case = MaskFormerConfig(backbone_config=A ) snake_case = 'huggingface/label-files' if "ade20k-full" in model_name: # this should be ok snake_case = 8_4_7 snake_case = 'maskformer-ade20k-full-id2label.json' elif "ade" in model_name: # this should be ok snake_case = 1_5_0 snake_case = 'ade20k-id2label.json' elif "coco-stuff" in model_name: # this should be ok snake_case = 1_7_1 snake_case = 'maskformer-coco-stuff-id2label.json' elif "coco" in model_name: # TODO snake_case = 1_3_3 snake_case = 'coco-panoptic-id2label.json' elif "cityscapes" in model_name: # this should be ok snake_case = 1_9 snake_case = 'cityscapes-id2label.json' elif "vistas" in model_name: # this should be ok snake_case = 6_5 snake_case = 'mapillary-vistas-id2label.json' snake_case = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} return config def __magic_name__ ( A ) -> Dict: snake_case = [] # stem # fmt: off rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') ) rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') ) # heads on top rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') ) rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') ) rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') ) for i in range(3 ): rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = dct.pop(A ) snake_case = val def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): snake_case = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) snake_case = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) snake_case = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case = in_proj_weight[:dim, :] snake_case = in_proj_bias[: dim] snake_case = in_proj_weight[ dim : dim * 2, : ] snake_case = in_proj_bias[ dim : dim * 2 ] snake_case = in_proj_weight[ -dim :, : ] snake_case = in_proj_bias[-dim :] # fmt: on def __magic_name__ ( A , A ) -> int: # fmt: off snake_case = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) snake_case = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) snake_case = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case = in_proj_weight[: hidden_size, :] snake_case = in_proj_bias[:config.hidden_size] snake_case = in_proj_weight[hidden_size : hidden_size * 2, :] snake_case = in_proj_bias[hidden_size : hidden_size * 2] snake_case = in_proj_weight[-hidden_size :, :] snake_case = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) snake_case = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) snake_case = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case = in_proj_weight[: hidden_size, :] snake_case = in_proj_bias[:config.hidden_size] snake_case = in_proj_weight[hidden_size : hidden_size * 2, :] snake_case = in_proj_bias[hidden_size : hidden_size * 2] snake_case = in_proj_weight[-hidden_size :, :] snake_case = in_proj_bias[-hidden_size :] # fmt: on def __magic_name__ ( ) -> torch.Tensor: snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case = Image.open(requests.get(A , stream=A ).raw ) return im @torch.no_grad() def __magic_name__ ( A , A , A , A = False ) -> Tuple: snake_case = get_maskformer_config(A ) # load original state_dict with open(A , 'rb' ) as f: snake_case = pickle.load(A ) snake_case = data['model'] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys snake_case = create_rename_keys(A ) for src, dest in rename_keys: rename_key(A , A , A ) read_in_swin_q_k_v(A , config.backbone_config ) read_in_decoder_q_k_v(A , A ) # update to torch tensors for key, value in state_dict.items(): snake_case = torch.from_numpy(A ) # load 🤗 model snake_case = MaskFormerForInstanceSegmentation(A ) model.eval() for name, param in model.named_parameters(): print(A , param.shape ) snake_case , snake_case = model.load_state_dict(A , strict=A ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(A ) == 0, F'''Unexpected keys: {unexpected_keys}''' # verify results snake_case = prepare_img() if "vistas" in model_name: snake_case = 6_5 elif "cityscapes" in model_name: snake_case = 6_5_5_3_5 else: snake_case = 2_5_5 snake_case = True if 'ade' in model_name else False snake_case = MaskFormerImageProcessor(ignore_index=A , reduce_labels=A ) snake_case = image_processor(A , return_tensors='pt' ) snake_case = model(**A ) print('Logits:' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": snake_case = torch.tensor( [[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , A , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(A ).mkdir(exist_ok=A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) if push_to_hub: print('Pushing model and image processor to the hub...' ) model.push_to_hub(F'''nielsr/{model_name}''' ) image_processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="maskformer-swin-tiny-ade", type=str, help=("Name of the MaskFormer model you'd like to convert",), ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl", type=str, help="Path to the original state dict (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowerCAmelCase_ = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
369
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
0
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self, lowercase_ ) -> Any: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'], model_result['ss'] ): snake_case = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(lowercase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'sshleifer/tiny-gpt2' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], eager_mode=lowercase_, multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> Any: snake_case = 'sgugger/tiny-distilbert-classification' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, only_pretrain_model=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> List[str]: snake_case = 'sshleifer/tiny-gpt2' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> Any: snake_case = 'sshleifer/tiny-gpt2' snake_case = AutoConfig.from_pretrained(lowercase_ ) snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], eager_mode=lowercase_, multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_, [config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'sshleifer/tiny-gpt2' snake_case = AutoConfig.from_pretrained(lowercase_ ) snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_, [config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> Any: snake_case = 'sshleifer/tiny-gpt2' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCamelCase ( self ) -> Dict: snake_case = 'sshleifer/tiny-gpt2' snake_case = AutoConfig.from_pretrained(lowercase_ ) snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_, [config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'patrickvonplaten/t5-tiny-random' snake_case = AutoConfig.from_pretrained(lowercase_ ) snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_, configs=[config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0, 'Cannot do xla on CPU.' ) def _lowerCamelCase ( self ) -> str: snake_case = 'sshleifer/tiny-gpt2' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], use_xla=lowercase_, multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> Dict: snake_case = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], inference=lowercase_, save_to_csv=lowercase_, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(lowercase_, 'inf_time.csv' ), inference_memory_csv_file=os.path.join(lowercase_, 'inf_mem.csv' ), env_info_csv_file=os.path.join(lowercase_, 'env.csv' ), multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) benchmark.run() self.assertTrue(Path(os.path.join(lowercase_, 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_, 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_, 'env.csv' ) ).exists() ) def _lowerCamelCase ( self ) -> Tuple: snake_case = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(lowercase_ ): self.assertTrue(hasattr(lowercase_, 'sequential' ) ) self.assertTrue(hasattr(lowercase_, 'cumulative' ) ) self.assertTrue(hasattr(lowercase_, 'current' ) ) self.assertTrue(hasattr(lowercase_, 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(lowercase_, 'log.txt' ), log_print=lowercase_, trace_memory_line_by_line=lowercase_, eager_mode=lowercase_, multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) snake_case = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(lowercase_, 'log.txt' ) ).exists() )
370
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def __magic_name__ ( ) -> Any: plt.scatter(A , A , color='red' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
332
0
from __future__ import annotations def __magic_name__ ( A , A = None , A = None , A = False , ) -> tuple[int, float, str]: snake_case = cipher_alphabet or [chr(A ) for i in range(9_7 , 1_2_3 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) snake_case = { 'a': 0.08_497, 'b': 0.01_492, 'c': 0.02_202, 'd': 0.04_253, 'e': 0.11_162, 'f': 0.02_228, 'g': 0.02_015, 'h': 0.06_094, 'i': 0.07_546, 'j': 0.00_153, 'k': 0.01_292, 'l': 0.04_025, 'm': 0.02_406, 'n': 0.06_749, 'o': 0.07_507, 'p': 0.01_929, 'q': 0.00_095, 'r': 0.07_587, 's': 0.06_327, 't': 0.09_356, 'u': 0.02_758, 'v': 0.00_978, 'w': 0.02_560, 'x': 0.00_150, 'y': 0.01_994, 'z': 0.00_077, } else: # Custom frequencies dictionary snake_case = frequencies_dict if not case_sensitive: snake_case = ciphertext.lower() # Chi squared statistic values snake_case = {} # cycle through all of the shifts for shift in range(len(A ) ): snake_case = '' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet snake_case = (alphabet_letters.index(letter.lower() ) - shift) % len( A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter snake_case = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: snake_case = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message snake_case = decrypted_with_shift.lower().count(A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies snake_case = frequencies[letter] * occurrences # Complete the chi squared statistic formula snake_case = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message snake_case = decrypted_with_shift.count(A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies snake_case = frequencies[letter] * occurrences # Complete the chi squared statistic formula snake_case = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary snake_case = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(A ) -> tuple[float, str]: return chi_squared_statistic_values[key] snake_case = min( A , key=A , ) # Get all the data from the most likely cipher (key, decoded message) ( ( snake_case ) , ( snake_case ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
371
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''''' snake_case_ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) snake_case_ = None # compression type in fsspec. ex: "gzip" snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str: super().__init__(self, **lowercase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case = fsspec.open( lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed. }, **(target_options or {}), ) snake_case = os.path.basename(self.file.path.split('::' )[0] ) snake_case = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) snake_case = None @classmethod def _lowerCamelCase ( cls, lowercase_ ) -> Any: # compressed file paths are always relative to the archive root return super()._strip_protocol(lowercase_ ).lstrip('/' ) def _lowerCamelCase ( self ) -> Optional[Any]: if self.dir_cache is None: snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} snake_case = {f['name']: f} def _lowerCamelCase ( self, lowercase_ ) -> str: return self.file.open().read() def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any: snake_case = self._strip_protocol(lowercase_ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''bz2''' snake_case_ = '''bz2''' snake_case_ = '''.bz2''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''gzip''' snake_case_ = '''gzip''' snake_case_ = '''.gz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''lz4''' snake_case_ = '''lz4''' snake_case_ = '''.lz4''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''xz''' snake_case_ = '''xz''' snake_case_ = '''.xz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''zstd''' snake_case_ = '''zstd''' snake_case_ = '''.zst''' def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]: super().__init__( fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case = self.file.__enter__ class lowerCamelCase : def __init__( self, lowercase_ ) -> List[Any]: snake_case = file_ def __enter__( self ) -> Dict: self._file.__enter__() return self def __exit__( self, *lowercase_, **lowercase_ ) -> Dict: self._file.__exit__(*lowercase_, **lowercase_ ) def __iter__( self ) -> List[str]: return iter(self._file ) def _lowerCamelCase ( self ) -> List[str]: return next(self._file ) def __getattr__( self, lowercase_ ) -> List[Any]: return getattr(self._file, lowercase_ ) def fixed_enter(*lowercase_, **lowercase_ ): return WrappedFile(_enter(*lowercase_, **lowercase_ ) ) snake_case = fixed_enter
332
0
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = '''▁''' UpperCamelCase = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', } UpperCamelCase = { '''vocab_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json''' ), }, '''spm_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model''' ) }, } UpperCamelCase = { '''facebook/s2t-small-librispeech-asr''': 1024, } UpperCamelCase = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de'''] UpperCamelCase = {'''mustc''': MUSTC_LANGS} class snake_case_ ( __A ): __A : Tuple = VOCAB_FILES_NAMES __A : Tuple = PRETRAINED_VOCAB_FILES_MAP __A : Optional[Any] = MAX_MODEL_INPUT_SIZES __A : Tuple = ["input_ids", "attention_mask"] __A : List[int] = [] def __init__( self : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int]="<s>" , lowercase_ : List[Any]="</s>" , lowercase_ : Dict="<pad>" , lowercase_ : Optional[int]="<unk>" , lowercase_ : Tuple=False , lowercase_ : List[str]=False , lowercase_ : Optional[int]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Optional[int] , ) -> None: lowercase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , do_upper_case=lowercase_ , do_lower_case=lowercase_ , tgt_lang=lowercase_ , lang_codes=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) lowercase__ : Dict = do_upper_case lowercase__ : Optional[Any] = do_lower_case lowercase__ : Optional[int] = load_json(lowercase_ ) lowercase__ : Tuple = {v: k for k, v in self.encoder.items()} lowercase__ : Optional[int] = spm_file lowercase__ : Union[str, Any] = load_spm(lowercase_ , self.sp_model_kwargs ) if lang_codes is not None: lowercase__ : List[str] = lang_codes lowercase__ : Union[str, Any] = LANGUAGES[lang_codes] lowercase__ : List[str] = [F'''<lang:{lang}>''' for lang in self.langs] lowercase__ : str = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs} lowercase__ : str = self.lang_tokens lowercase__ : Optional[Any] = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: lowercase__ : Dict = {} @property def __UpperCamelCase ( self : List[Any] ) -> int: return len(self.encoder ) @property def __UpperCamelCase ( self : List[str] ) -> str: return self._tgt_lang @tgt_lang.setter def __UpperCamelCase ( self : str , lowercase_ : List[Any] ) -> None: lowercase__ : Dict = new_tgt_lang self.set_tgt_lang_special_tokens(lowercase_ ) def __UpperCamelCase ( self : List[str] , lowercase_ : str ) -> None: lowercase__ : Any = self.lang_code_to_id[tgt_lang] lowercase__ : Tuple = [lang_code_id] def __UpperCamelCase ( self : List[str] , lowercase_ : str ) -> List[str]: return self.sp_model.encode(lowercase_ , out_type=lowercase_ ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[int] ) -> Any: return self.encoder.get(lowercase_ , self.encoder[self.unk_token] ) def __UpperCamelCase ( self : int , lowercase_ : int ) -> str: return self.decoder.get(lowercase_ , self.unk_token ) def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> str: lowercase__ : Optional[int] = [] lowercase__ : List[str] = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: lowercase__ : List[Any] = self.sp_model.decode(lowercase_ ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " lowercase__ : Tuple = [] else: current_sub_tokens.append(lowercase_ ) lowercase__ : List[Any] = self.sp_model.decode(lowercase_ ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def __UpperCamelCase ( self : List[str] , lowercase_ : int , lowercase_ : Dict=None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ ) lowercase__ : int = [1] * len(self.prefix_tokens ) lowercase__ : Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones def __UpperCamelCase ( self : str ) -> Dict: lowercase__ : int = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ) -> Dict: lowercase__ : Optional[Any] = self.__dict__.copy() lowercase__ : Any = None return state def __setstate__( self : List[Any] , lowercase_ : Dict ) -> None: lowercase__ : Tuple = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowercase__ : Optional[int] = {} lowercase__ : Tuple = load_spm(self.spm_file , self.sp_model_kwargs ) def __UpperCamelCase ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]: lowercase__ : Any = Path(lowercase_ ) assert save_dir.is_dir(), F'''{save_directory} should be a directory''' lowercase__ : str = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) lowercase__ : Dict = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder , lowercase_ ) if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , lowercase_ ) elif not os.path.isfile(self.spm_file ): with open(lowercase_ , "wb" ) as fi: lowercase__ : Any = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (str(lowercase_ ), str(lowercase_ )) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any]): lowercase__ : List[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase) spm.Load(str(_lowerCamelCase)) return spm def lowercase_ ( _lowerCamelCase : str): with open(_lowerCamelCase , "r") as f: return json.load(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str): with open(_lowerCamelCase , "w") as f: json.dump(_lowerCamelCase , _lowerCamelCase , indent=2)
333
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node UpperCamelCase = 4 UpperCamelCase = 3 class snake_case_ ( __A ): pass def lowercase_ ( _lowerCamelCase : List[str]): for shard in shards: for i in range(_lowerCamelCase): yield {"i": i, "shard": shard} def lowercase_ ( ): lowercase__ : List[str] = int(os.environ["RANK"]) lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"]) lowercase__ : Union[str, Any] = ArgumentParser() parser.add_argument("--streaming" , type=_lowerCamelCase) parser.add_argument("--local_rank" , type=_lowerCamelCase) parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0) lowercase__ : int = parser.parse_args() lowercase__ : Union[str, Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]} lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase) if not streaming: lowercase__ : str = Dataset.from_list(list(_lowerCamelCase)) lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase) lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase) lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : Any = full_size // world_size expected_local_size += int(rank < (full_size % world_size)) lowercase__ : List[str] = sum(1 for _ in dataloader) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''') if __name__ == "__main__": main()
333
1
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Optional[int] ) -> Tuple: lowercase__ : str = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowercase_ ) ) def __UpperCamelCase ( self : Any ) -> Optional[Any]: lowercase__ : Dict = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowercase_ ) ) def __UpperCamelCase ( self : Tuple ) -> Tuple: lowercase__ : Union[str, Any] = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowercase_ ) ) def __UpperCamelCase ( self : Tuple ) -> Any: lowercase__ : Optional[Any] = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(lowercase_ ) ) def __UpperCamelCase ( self : Tuple ) -> str: lowercase__ : Optional[Any] = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(lowercase_ ) ) def __UpperCamelCase ( self : Dict ) -> str: lowercase__ : Any = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] lowercase__ : Any = "fp16" self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) ) def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: lowercase__ : Optional[int] = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] lowercase__ : Optional[int] = "fp16" self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: # pass variant but use the non-variant filenames lowercase__ : List[Any] = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] lowercase__ : List[Any] = "fp16" self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) ) def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: lowercase__ : Tuple = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] lowercase__ : str = "fp16" self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) ) def __UpperCamelCase ( self : str ) -> str: lowercase__ : Optional[int] = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] lowercase__ : List[Any] = "fp16" self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) ) def __UpperCamelCase ( self : Union[str, Any] ) -> int: # pass variant but use the non-variant filenames lowercase__ : Union[str, Any] = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] lowercase__ : Any = "fp16" self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: lowercase__ : str = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] lowercase__ : List[str] = "fp16" self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
333
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class snake_case_ ( __A ): __A : List[str] = "unispeech" def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any: super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ ) lowercase__ : List[str] = hidden_size lowercase__ : Any = feat_extract_norm lowercase__ : Optional[Any] = feat_extract_activation lowercase__ : Dict = list(lowercase_ ) lowercase__ : Union[str, Any] = list(lowercase_ ) lowercase__ : List[str] = list(lowercase_ ) lowercase__ : List[str] = conv_bias lowercase__ : Any = num_conv_pos_embeddings lowercase__ : Dict = num_conv_pos_embedding_groups lowercase__ : int = len(self.conv_dim ) lowercase__ : str = num_hidden_layers lowercase__ : Any = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : int = num_attention_heads lowercase__ : Union[str, Any] = hidden_dropout lowercase__ : Any = attention_dropout lowercase__ : Union[str, Any] = activation_dropout lowercase__ : Any = feat_proj_dropout lowercase__ : str = final_dropout lowercase__ : int = layerdrop lowercase__ : Optional[int] = layer_norm_eps lowercase__ : List[Any] = initializer_range lowercase__ : Any = num_ctc_classes lowercase__ : int = vocab_size lowercase__ : str = do_stable_layer_norm lowercase__ : Any = use_weighted_layer_sum lowercase__ : Dict = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__ : List[Any] = apply_spec_augment lowercase__ : Dict = mask_time_prob lowercase__ : Tuple = mask_time_length lowercase__ : str = mask_time_min_masks lowercase__ : List[Any] = mask_feature_prob lowercase__ : int = mask_feature_length lowercase__ : Optional[int] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase__ : Optional[int] = num_codevectors_per_group lowercase__ : List[str] = num_codevector_groups lowercase__ : Dict = contrastive_logits_temperature lowercase__ : Tuple = feat_quantizer_dropout lowercase__ : Any = num_negatives lowercase__ : Dict = codevector_dim lowercase__ : Tuple = proj_codevector_dim lowercase__ : List[str] = diversity_loss_weight # ctc loss lowercase__ : Tuple = ctc_loss_reduction lowercase__ : Dict = ctc_zero_infinity # pretraining loss lowercase__ : Optional[Any] = replace_prob @property def __UpperCamelCase ( self : Dict ) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1 )
333
1
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def lowercase_ ( _lowerCamelCase : Optional[int]): lowercase__ : int = model.config lowercase__ : Tuple = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) lowercase__ : str = MBartConfig( is_decoder=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , add_cross_attention=_lowerCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer) , scale_embedding=_lowerCamelCase , add_final_layer_norm=_lowerCamelCase , ) return encoder_config, decoder_config def lowercase_ ( _lowerCamelCase : Union[str, Any]): if "encoder.model" in name: lowercase__ : Optional[Any] = name.replace("encoder.model" , "encoder") if "decoder.model" in name: lowercase__ : Any = name.replace("decoder.model" , "decoder") if "patch_embed.proj" in name: lowercase__ : Dict = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: lowercase__ : Union[str, Any] = name.replace("patch_embed.norm" , "embeddings.norm") if name.startswith("encoder"): if "layers" in name: lowercase__ : int = "encoder." + name if "attn.proj" in name: lowercase__ : Dict = name.replace("attn.proj" , "attention.output.dense") if "attn" in name and "mask" not in name: lowercase__ : List[str] = name.replace("attn" , "attention.self") if "norm1" in name: lowercase__ : Dict = name.replace("norm1" , "layernorm_before") if "norm2" in name: lowercase__ : Union[str, Any] = name.replace("norm2" , "layernorm_after") if "mlp.fc1" in name: lowercase__ : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense") if "mlp.fc2" in name: lowercase__ : int = name.replace("mlp.fc2" , "output.dense") if name == "encoder.norm.weight": lowercase__ : str = "encoder.layernorm.weight" if name == "encoder.norm.bias": lowercase__ : Union[str, Any] = "encoder.layernorm.bias" return name def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]): for key in orig_state_dict.copy().keys(): lowercase__ : str = orig_state_dict.pop(_lowerCamelCase) if "qkv" in key: lowercase__ : Tuple = key.split(".") lowercase__ : int = int(key_split[3]) lowercase__ : List[Any] = int(key_split[5]) lowercase__ : Tuple = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowercase__ : Union[str, Any] = val[:dim, :] lowercase__ : Any = val[dim : dim * 2, :] lowercase__ : str = val[-dim:, :] else: lowercase__ : Tuple = val[:dim] lowercase__ : int = val[dim : dim * 2] lowercase__ : List[Any] = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: lowercase__ : Optional[Any] = val return orig_state_dict def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=False): # load original model lowercase__ : List[Any] = DonutModel.from_pretrained(_lowerCamelCase).eval() # load HuggingFace model lowercase__ , lowercase__ : Optional[Any] = get_configs(_lowerCamelCase) lowercase__ : Tuple = DonutSwinModel(_lowerCamelCase) lowercase__ : Optional[int] = MBartForCausalLM(_lowerCamelCase) lowercase__ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase) model.eval() lowercase__ : Tuple = original_model.state_dict() lowercase__ : int = convert_state_dict(_lowerCamelCase , _lowerCamelCase) model.load_state_dict(_lowerCamelCase) # verify results on scanned document lowercase__ : str = load_dataset("hf-internal-testing/example-documents") lowercase__ : str = dataset["test"][0]["image"].convert("RGB") lowercase__ : Optional[int] = XLMRobertaTokenizerFast.from_pretrained(_lowerCamelCase , from_slow=_lowerCamelCase) lowercase__ : Tuple = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1]) lowercase__ : Optional[Any] = DonutProcessor(_lowerCamelCase , _lowerCamelCase) lowercase__ : Dict = processor(_lowerCamelCase , return_tensors="pt").pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": lowercase__ : Optional[int] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" lowercase__ : List[Any] = "When is the coffee break?" lowercase__ : List[Any] = task_prompt.replace("{user_input}" , _lowerCamelCase) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": lowercase__ : int = "<s_rvlcdip>" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: lowercase__ : Any = "<s_cord>" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": lowercase__ : Optional[Any] = "s_cord-v2>" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": lowercase__ : Union[str, Any] = "<s_zhtrainticket>" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt lowercase__ : Any = "hello world" else: raise ValueError("Model name not supported") lowercase__ : Union[str, Any] = original_model.decoder.tokenizer(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors="pt")[ "input_ids" ] lowercase__ : str = original_model.encoder.model.patch_embed(_lowerCamelCase) lowercase__ , lowercase__ : Any = model.encoder.embeddings(_lowerCamelCase) assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3) # verify encoder hidden states lowercase__ : Dict = original_model.encoder(_lowerCamelCase) lowercase__ : Union[str, Any] = model.encoder(_lowerCamelCase).last_hidden_state assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-2) # verify decoder hidden states lowercase__ : int = original_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase).logits lowercase__ : Tuple = model(_lowerCamelCase , decoder_input_ids=_lowerCamelCase).logits assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f'''Saving model and processor to {pytorch_dump_folder_path}''') model.save_pretrained(_lowerCamelCase) processor.save_pretrained(_lowerCamelCase) if push_to_hub: model.push_to_hub("nielsr/" + model_name.split("/")[-1] , commit_message="Update model") processor.push_to_hub("nielsr/" + model_name.split("/")[-1] , commit_message="Update model") if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''naver-clova-ix/donut-base-finetuned-docvqa''', required=False, type=str, help='''Name of the original model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, required=False, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub.''', ) UpperCamelCase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
333
def lowercase_ ( _lowerCamelCase : list): for i in range(len(_lowerCamelCase) - 1 , 0 , -1): lowercase__ : int = False for j in range(_lowerCamelCase , 0 , -1): if unsorted[j] < unsorted[j - 1]: lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j] lowercase__ : List[str] = True for j in range(_lowerCamelCase): if unsorted[j] > unsorted[j + 1]: lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j] lowercase__ : Dict = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(f"{cocktail_shaker_sort(unsorted) = }")
333
1
UpperCamelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]): # Return True if there is node that has not iterated. lowercase__ : List[str] = [False] * len(_lowerCamelCase) lowercase__ : Optional[Any] = [s] lowercase__ : int = True while queue: lowercase__ : Optional[Any] = queue.pop(0) for ind in range(len(graph[u])): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase) lowercase__ : Union[str, Any] = True lowercase__ : List[Any] = u return visited[t] def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]): lowercase__ : Union[str, Any] = [-1] * (len(_lowerCamelCase)) lowercase__ : Optional[int] = 0 lowercase__ : Optional[int] = [] lowercase__ : Union[str, Any] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase): lowercase__ : Any = float("Inf") lowercase__ : Optional[int] = sink while s != source: # Find the minimum value in select path lowercase__ : str = min(_lowerCamelCase , graph[parent[s]][s]) lowercase__ : Dict = parent[s] max_flow += path_flow lowercase__ : Union[str, Any] = sink while v != source: lowercase__ : Optional[int] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase__ : List[str] = parent[v] for i in range(len(_lowerCamelCase)): for j in range(len(graph[0])): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j)) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
333
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask UpperCamelCase = logging.getLogger(__name__) class snake_case_ ( __A ): __A : int = "token-classification" def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]: if type(lowercase_ ) == dict: lowercase__ : Dict = Namespace(**lowercase_ ) lowercase__ : str = import_module("tasks" ) try: lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type ) lowercase__ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels ) lowercase__ : int = CrossEntropyLoss().ignore_index super().__init__(lowercase_ , len(self.labels ) , self.mode ) def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any: return self.model(**lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple: lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": lowercase__ : Tuple = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids lowercase__ : Optional[int] = self(**lowercase_ ) lowercase__ : Union[str, Any] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]: lowercase__ : Tuple = self.hparams for mode in ["train", "dev", "test"]: lowercase__ : Any = self._feature_file(lowercase_ ) if os.path.exists(lowercase_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , lowercase_ ) lowercase__ : str = torch.load(lowercase_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ ) lowercase__ : Dict = self.token_classification_task.convert_examples_to_features( lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("Saving features into cached file %s" , lowercase_ ) torch.save(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader: lowercase__ : str = self._feature_file(lowercase_ ) logger.info("Loading features from cached file %s" , lowercase_ ) lowercase__ : str = torch.load(lowercase_ ) lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str: """Compute validation""" "" lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": lowercase__ : int = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids lowercase__ : List[Any] = self(**lowercase_ ) lowercase__ , lowercase__ : Any = outputs[:2] lowercase__ : Optional[Any] = logits.detach().cpu().numpy() lowercase__ : int = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]: lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean() lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 ) lowercase__ : Dict = np.argmax(lowercase_ , axis=2 ) lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 ) lowercase__ : Any = dict(enumerate(self.labels ) ) lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )] lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) lowercase__ : Any = { "val_loss": val_loss_mean, "accuracy_score": accuracy_score(lowercase_ , lowercase_ ), "precision": precision_score(lowercase_ , lowercase_ ), "recall": recall_score(lowercase_ , lowercase_ ), "f1": fa_score(lowercase_ , lowercase_ ), } lowercase__ : List[Any] = dict(results.items() ) lowercase__ : List[str] = results return ret, preds_list, out_label_list def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict: # when stable lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ ) lowercase__ : Any = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int: # updating to test_epoch_end instead of deprecated test_end lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 lowercase__ : Optional[int] = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple: # Add NER specific options BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ ) parser.add_argument( "--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" ) parser.add_argument( "--max_seq_length" , default=1_28 , type=lowercase_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , ) parser.add_argument( "--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd()) UpperCamelCase = parser.parse_args() UpperCamelCase = NERTransformer(args) UpperCamelCase = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True)) UpperCamelCase = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
333
1
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed UpperCamelCase = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowercase_ ( _lowerCamelCase : Optional[int]): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]): if args.student_type == "roberta": lowercase__ : Dict = False elif args.student_type == "gpt2": lowercase__ : str = False def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]): if args.student_type == "roberta": lowercase__ : Union[str, Any] = False def lowercase_ ( ): lowercase__ : str = argparse.ArgumentParser(description="Training") parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists.") parser.add_argument( "--dump_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory (log, checkpoints, parameters, etc.)") parser.add_argument( "--data_file" , type=_lowerCamelCase , required=_lowerCamelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=_lowerCamelCase , choices=["distilbert", "roberta", "gpt2"] , required=_lowerCamelCase , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to the student configuration.") parser.add_argument( "--student_pretrained_weights" , default=_lowerCamelCase , type=_lowerCamelCase , help="Load student initialization checkpoint.") parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=_lowerCamelCase , help="Teacher type (BERT, RoBERTa).") parser.add_argument("--teacher_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="The teacher model.") parser.add_argument("--temperature" , default=2.0 , type=_lowerCamelCase , help="Temperature for the softmax temperature.") parser.add_argument( "--alpha_ce" , default=0.5 , type=_lowerCamelCase , help="Linear weight for the distillation loss. Must be >=0.") parser.add_argument( "--alpha_mlm" , default=0.0 , type=_lowerCamelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=_lowerCamelCase , help="Linear weight for the CLM loss. Must be >=0.") parser.add_argument("--alpha_mse" , default=0.0 , type=_lowerCamelCase , help="Linear weight of the MSE loss. Must be >=0.") parser.add_argument( "--alpha_cos" , default=0.0 , type=_lowerCamelCase , help="Linear weight of the cosine embedding loss. Must be >=0.") parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.") parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=_lowerCamelCase , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=_lowerCamelCase , help="Proportion of tokens to mask out.") parser.add_argument("--word_keep" , default=0.1 , type=_lowerCamelCase , help="Proportion of tokens to keep.") parser.add_argument("--word_rand" , default=0.1 , type=_lowerCamelCase , help="Proportion of tokens to randomly replace.") parser.add_argument( "--mlm_smoothing" , default=0.7 , type=_lowerCamelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=_lowerCamelCase , help="The token counts in the data_file for MLM.") parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=_lowerCamelCase , default=3 , help="Number of pass on the whole dataset.") parser.add_argument("--batch_size" , type=_lowerCamelCase , default=5 , help="Batch size (for each process).") parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=_lowerCamelCase , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=_lowerCamelCase , help="Linear warmup proportion.") parser.add_argument("--weight_decay" , default=0.0 , type=_lowerCamelCase , help="Weight decay if we apply some.") parser.add_argument("--learning_rate" , default=5E-4 , type=_lowerCamelCase , help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon" , default=1E-6 , type=_lowerCamelCase , help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm" , default=5.0 , type=_lowerCamelCase , help="Max gradient norm.") parser.add_argument("--initializer_range" , default=0.02 , type=_lowerCamelCase , help="Random initialization range.") parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=_lowerCamelCase , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=_lowerCamelCase , default=1 , help="Number of GPUs in the node.") parser.add_argument("--local_rank" , type=_lowerCamelCase , default=-1 , help="Distributed training - Local rank") parser.add_argument("--seed" , type=_lowerCamelCase , default=56 , help="Random seed") parser.add_argument("--log_interval" , type=_lowerCamelCase , default=500 , help="Tensorboard logging interval.") parser.add_argument("--checkpoint_interval" , type=_lowerCamelCase , default=4000 , help="Checkpoint interval.") lowercase__ : List[str] = parser.parse_args() sanity_checks(_lowerCamelCase) # ARGS # init_gpu_params(_lowerCamelCase) set_seed(_lowerCamelCase) if args.is_master: if os.path.exists(args.dump_path): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' " itUse `--force` if you want to overwrite it") else: shutil.rmtree(args.dump_path) if not os.path.exists(args.dump_path): os.makedirs(args.dump_path) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''') # SAVE PARAMS # logger.info(f'''Param: {args}''') with open(os.path.join(args.dump_path , "parameters.json") , "w") as f: json.dump(vars(_lowerCamelCase) , _lowerCamelCase , indent=4) git_log(args.dump_path) lowercase__ , lowercase__ , lowercase__ : int = MODEL_CLASSES[args.student_type] lowercase__ , lowercase__ , lowercase__ : List[Any] = MODEL_CLASSES[args.teacher_type] # TOKENIZER # lowercase__ : Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name) lowercase__ : Any = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): lowercase__ : Tuple = tokenizer.all_special_tokens.index(_lowerCamelCase) lowercase__ : str = tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''') lowercase__ : List[Any] = special_tok_ids lowercase__ : List[str] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file , "rb") as fp: lowercase__ : Tuple = pickle.load(_lowerCamelCase) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''') with open(args.token_counts , "rb") as fp: lowercase__ : Optional[Any] = pickle.load(_lowerCamelCase) lowercase__ : List[Any] = np.maximum(_lowerCamelCase , 1) ** -args.mlm_smoothing for idx in special_tok_ids.values(): lowercase__ : Union[str, Any] = 0.0 # do not predict special tokens lowercase__ : List[str] = torch.from_numpy(_lowerCamelCase) else: lowercase__ : Any = None lowercase__ : List[str] = LmSeqsDataset(params=_lowerCamelCase , data=_lowerCamelCase) logger.info("Data loader created.") # STUDENT # logger.info(f'''Loading student config from {args.student_config}''') lowercase__ : Dict = student_config_class.from_pretrained(args.student_config) lowercase__ : Optional[Any] = True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''') lowercase__ : List[str] = student_model_class.from_pretrained(args.student_pretrained_weights , config=_lowerCamelCase) else: lowercase__ : str = student_model_class(_lowerCamelCase) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''') logger.info("Student loaded.") # TEACHER # lowercase__ : List[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_lowerCamelCase) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''') logger.info(f'''Teacher loaded from {args.teacher_name}.''') # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_lowerCamelCase , _lowerCamelCase) if args.freeze_token_type_embds: freeze_token_type_embeddings(_lowerCamelCase , _lowerCamelCase) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() lowercase__ : Union[str, Any] = Distiller( params=_lowerCamelCase , dataset=_lowerCamelCase , token_probs=_lowerCamelCase , student=_lowerCamelCase , teacher=_lowerCamelCase) distiller.train() logger.info("Let's go get some drinks.") if __name__ == "__main__": main()
333
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { '''configuration_mask2former''': [ '''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Mask2FormerConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Mask2FormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Mask2FormerForUniversalSegmentation''', '''Mask2FormerModel''', '''Mask2FormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
333
1
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCamelCase = logging.get_logger(__name__) logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str): if "xprophetnet" in prophetnet_checkpoint_path: lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowerCamelCase) lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained( _lowerCamelCase , output_loading_info=_lowerCamelCase) else: lowercase__ : str = ProphetNetForConditionalGenerationOld.from_pretrained(_lowerCamelCase) lowercase__ , lowercase__ : Union[str, Any] = ProphetNetForConditionalGeneration.from_pretrained( _lowerCamelCase , output_loading_info=_lowerCamelCase) lowercase__ : Union[str, Any] = ["key_proj", "value_proj", "query_proj"] lowercase__ : int = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: lowercase__ : Any = key.split(".") if attributes[0] == "lm_head": lowercase__ : Any = prophet lowercase__ : List[Any] = prophet_old else: lowercase__ : Any = prophet.prophetnet lowercase__ : List[Any] = prophet_old.model lowercase__ : Dict = False for attribute in attributes: if attribute in mapping: lowercase__ : Tuple = mapping[attribute] if not hasattr(_lowerCamelCase , _lowerCamelCase) and len(_lowerCamelCase) > 0: lowercase__ : str = attribute elif hasattr(_lowerCamelCase , _lowerCamelCase): lowercase__ : Optional[int] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowercase__ : List[str] = old_model.weight logger.info(f'''{attribute} is initialized.''') lowercase__ : str = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowercase__ : Dict = old_model.bias logger.info(f'''{attribute} is initialized''') lowercase__ : List[str] = True break elif attribute in special_keys and hasattr(_lowerCamelCase , "in_proj_weight"): lowercase__ : str = old_model.in_proj_weight.shape[0] // 3 lowercase__ : Tuple = getattr(_lowerCamelCase , _lowerCamelCase) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowercase__ : List[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :]) lowercase__ : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim]) elif attribute == "key_proj": lowercase__ : Optional[int] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :]) lowercase__ : str = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim]) elif attribute == "value_proj": lowercase__ : Any = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :]) lowercase__ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :]) lowercase__ : List[Any] = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :]) lowercase__ : Union[str, Any] = True break if attribute.isdigit(): lowercase__ : Any = model[int(_lowerCamelCase)] lowercase__ : str = old_model[int(_lowerCamelCase)] else: lowercase__ : int = getattr(_lowerCamelCase , _lowerCamelCase) if old_attribute == "": lowercase__ : Dict = old_model else: if not hasattr(_lowerCamelCase , _lowerCamelCase): raise ValueError(f'''{old_model} does not have {old_attribute}''') lowercase__ : Dict = getattr(_lowerCamelCase , _lowerCamelCase) if not is_key_init: raise ValueError(f'''{key} was not correctly initialized!''') print(f'''Saving model to {pytorch_dump_folder_path}''') prophet.save_pretrained(_lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
333
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowercase_ ( _lowerCamelCase : List[str]): return 1 / (1 + np.exp(-z)) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple): return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase))) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000): lowercase__ : Optional[int] = np.zeros(x.shape[1]) for iterations in range(_lowerCamelCase): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Tuple = sigmoid_function(_lowerCamelCase) lowercase__ : Dict = np.dot(x.T , h - y) / y.size lowercase__ : int = theta - alpha * gradient # updating the weights lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase) lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase) if iterations % 100 == 0: print(f'''loss: {j} \t''') # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase = datasets.load_iris() UpperCamelCase = iris.data[:, :2] UpperCamelCase = (iris.target != 0) * 1 UpperCamelCase = 0.1 UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000) print('''theta: ''', theta) # printing the theta i.e our weights vector def lowercase_ ( _lowerCamelCase : List[Any]): return sigmoid_function( np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
333
1
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : List[str] = len(_lowerCamelCase) lowercase__ : Dict = len(_lowerCamelCase) lowercase__ : Optional[Any] = [[False for _ in range(m + 1)] for _ in range(n + 1)] lowercase__ : Optional[int] = True for i in range(_lowerCamelCase): for j in range(m + 1): if dp[i][j]: if j < m and a[i].upper() == b[j]: lowercase__ : int = True if a[i].islower(): lowercase__ : str = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
333
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__A ) class snake_case_ ( __A ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} ) __A : ClassVar[Features] = Features({"text": Value("string" )} ) __A : ClassVar[Features] = Features({"labels": ClassLabel} ) __A : str = "text" __A : str = "labels" def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int: if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) lowercase__ : Optional[int] = copy.deepcopy(self ) lowercase__ : Tuple = self.label_schema.copy() lowercase__ : Union[str, Any] = features[self.label_column] lowercase__ : int = label_schema return task_template @property def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
333
1
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class snake_case_ ( __A ): __A : Optional[int] = ["image_processor", "tokenizer"] __A : int = "BlipImageProcessor" __A : Any = "AutoTokenizer" def __init__( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] ) -> Optional[int]: super().__init__(lowercase_ , lowercase_ ) # add QFormer tokenizer lowercase__ : Union[str, Any] = qformer_tokenizer def __call__( self : int , lowercase_ : ImageInput = None , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : List[str] , ) -> BatchFeature: if images is None and text is None: raise ValueError("You have to specify at least images or text." ) lowercase__ : Optional[int] = BatchFeature() if text is not None: lowercase__ : int = self.tokenizer( text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) encoding.update(lowercase_ ) lowercase__ : str = self.qformer_tokenizer( text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) lowercase__ : int = qformer_text_encoding.pop("input_ids" ) lowercase__ : str = qformer_text_encoding.pop("attention_mask" ) if images is not None: lowercase__ : Any = self.image_processor(lowercase_ , return_tensors=lowercase_ ) encoding.update(lowercase_ ) return encoding def __UpperCamelCase ( self : int , *lowercase_ : Tuple , **lowercase_ : Optional[int] ) -> Tuple: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Dict ) -> Optional[Any]: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]: lowercase__ : Union[str, Any] = self.tokenizer.model_input_names lowercase__ : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def __UpperCamelCase ( self : Dict , lowercase_ : Any , **lowercase_ : Optional[Any] ) -> Optional[Any]: if os.path.isfile(lowercase_ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) lowercase__ : Optional[Any] = os.path.join(lowercase_ , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(lowercase_ ) return super().save_pretrained(lowercase_ , **lowercase_ ) @classmethod def __UpperCamelCase ( cls : Any , lowercase_ : List[str] , **lowercase_ : Any ) -> Any: lowercase__ : Optional[int] = AutoTokenizer.from_pretrained(lowercase_ , subfolder="qformer_tokenizer" ) lowercase__ : Dict = cls._get_arguments_from_pretrained(lowercase_ , **lowercase_ ) args.append(lowercase_ ) return cls(*lowercase_ )
333
def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True): assert ( isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") return min_val if option else max_val def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): return int((number_a + number_a) / 2) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int): assert ( isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)") if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value") def answer(_lowerCamelCase : int) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started...") lowercase__ : Optional[int] = lower lowercase__ : List[Any] = higher lowercase__ : Dict = [] while True: lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase) last_numbers.append(_lowerCamelCase) if answer(_lowerCamelCase) == "low": lowercase__ : List[str] = number elif answer(_lowerCamelCase) == "high": lowercase__ : Optional[int] = number else: break print(f'''guess the number : {last_numbers[-1]}''') print(f'''details : {last_numbers!s}''') def lowercase_ ( ): lowercase__ : Tuple = int(input("Enter lower value : ").strip()) lowercase__ : Optional[int] = int(input("Enter high value : ").strip()) lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip()) guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) if __name__ == "__main__": main()
333
1
def lowercase_ ( _lowerCamelCase : dict): lowercase__ : int = set() # edges = list of graph's edges lowercase__ : Union[str, Any] = get_edges(_lowerCamelCase) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: lowercase__ , lowercase__ : List[str] = edges.pop() chosen_vertices.add(_lowerCamelCase) chosen_vertices.add(_lowerCamelCase) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(_lowerCamelCase) return chosen_vertices def lowercase_ ( _lowerCamelCase : dict): lowercase__ : List[Any] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node)) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
333
import os import re import shutil import sys import tempfile import unittest import black UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. UpperCamelCase = ''' \""" Output class for the scheduler\'s step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None ''' class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : str ) -> List[str]: lowercase__ : str = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) ) lowercase__ : List[Any] = self.diffusers_dir shutil.copy( os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , ) def __UpperCamelCase ( self : Optional[int] ) -> List[str]: lowercase__ : Dict = "src/diffusers" shutil.rmtree(self.diffusers_dir ) def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple: lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ ) lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" ) with open(lowercase_ , "w" , newline="\n" ) as f: f.write(lowercase_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowercase_ ) with open(lowercase_ , "r" ) as f: self.assertTrue(f.read() , lowercase_ ) def __UpperCamelCase ( self : str ) -> Optional[int]: lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" ) self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : int ) -> str: # Base copy consistency self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , ) # Copy consistency with a really long name lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
333
1
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) UpperCamelCase = logging.getLogger(__name__) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_0522, type=int) UpperCamelCase = parser.parse_args() logger.info(f"Loading data from {args.data_file}") with open(args.data_file, '''rb''') as fp: UpperCamelCase = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') UpperCamelCase = Counter() for tk_ids in data: counter.update(tk_ids) UpperCamelCase = [0] * args.vocab_size for k, v in counter.items(): UpperCamelCase = v logger.info(f"Dump to {args.token_counts_dump}") with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
333
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): for param, grad_param in zip(model_a.parameters() , model_b.parameters()): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True): model.train() lowercase__ : Tuple = model(_lowerCamelCase) lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device)) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False): set_seed(42) lowercase__ : Dict = RegressionModel() lowercase__ : int = deepcopy(_lowerCamelCase) lowercase__ : str = RegressionDataset(length=80) lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16) model.to(accelerator.device) if sched: lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3) lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3) lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65) lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65) # Make a copy of `model` if sched: lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowercase_ ( _lowerCamelCase : Tuple): # Test when on a single CPU or GPU that the context manager does nothing lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase) # Use a single batch lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values() for iteration in range(3): # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: # Sync grads step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))] def lowercase_ ( _lowerCamelCase : Any): # Test on distributed setup that context manager behaves properly lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase) # Use a single batch lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values() for iteration in range(3): # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: # Sync grads step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))] def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False): lowercase__ : int = Accelerator( split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2) # Test that context manager behaves properly lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase) for iteration, batch in enumerate(_lowerCamelCase): lowercase__ , lowercase__ : str = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) with accelerator.accumulate(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))] GradientState._reset_state() def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False): lowercase__ : Dict = Accelerator( split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2) # Test that context manager behaves properly lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase) for iteration, batch in enumerate(_lowerCamelCase): lowercase__ , lowercase__ : Any = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)): if split_batches: sched.step() else: for _ in range(accelerator.num_processes): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase)) if accelerator.num_processes > 1: check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) GradientState._reset_state() def lowercase_ ( ): lowercase__ : List[str] = Accelerator() lowercase__ : List[Any] = RegressionDataset(length=80) lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16) lowercase__ : int = RegressionDataset(length=96) lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16) lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_lowerCamelCase): assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase) if iteration < len(_lowerCamelCase) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_lowerCamelCase): assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase) if batch_num < len(_lowerCamelCase) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowercase_ ( ): lowercase__ : str = Accelerator() lowercase__ : Dict = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**") test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**") test_noop_sync(_lowerCamelCase) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**") test_distributed_sync(_lowerCamelCase) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Any): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
333
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class snake_case_ ( __A ): __A : int = VOCAB_FILES_NAMES __A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __A : Union[str, Any] = ["input_ids", "attention_mask"] __A : Any = None def __init__( self : Tuple , lowercase_ : List[str]=None , lowercase_ : Optional[Any]=None , lowercase_ : List[str]=None , lowercase_ : Dict="<unk>" , lowercase_ : Optional[Any]="<s>" , lowercase_ : List[Any]="</s>" , lowercase_ : Optional[int]="<pad>" , lowercase_ : Dict=False , lowercase_ : Tuple=False , **lowercase_ : Tuple , ) -> Tuple: super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , add_prefix_space=lowercase_ , clean_up_tokenization_spaces=lowercase_ , **lowercase_ , ) lowercase__ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowercase_ ) != add_prefix_space: lowercase__ : Optional[int] = getattr(lowercase_ , pre_tok_state.pop("type" ) ) lowercase__ : Dict = add_prefix_space lowercase__ : Optional[Any] = pre_tok_class(**lowercase_ ) lowercase__ : Dict = add_prefix_space def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : int ) -> BatchEncoding: lowercase__ : Union[str, Any] = kwargs.get("is_split_into_words" , lowercase_ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' " pretokenized inputs." ) return super()._batch_encode_plus(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Any , *lowercase_ : Dict , **lowercase_ : List[Any] ) -> BatchEncoding: lowercase__ : List[Any] = kwargs.get("is_split_into_words" , lowercase_ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' " pretokenized inputs." ) return super()._encode_plus(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]: lowercase__ : Tuple = self._tokenizer.model.save(lowercase_ , name=lowercase_ ) return tuple(lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]: lowercase__ : str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] ) if len(lowercase_ ) > self.model_max_length: lowercase__ : Tuple = input_ids[-self.model_max_length :] return input_ids
333
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase) lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase) lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase) lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": lowercase__ : Any = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": lowercase__ : int = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Dict = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global].") # Encoder for layer_index in range(config.num_layers): lowercase__ : str = f'''layers_{str(_lowerCamelCase)}''' # Self-Attention lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"] lowercase__ : Any = tax_attention_key lowercase__ : Any = tax_attention_out lowercase__ : Any = tax_attention_query lowercase__ : List[str] = tax_attention_value lowercase__ : List[str] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Any = tax_global_layer_norm if split_mlp_wi: lowercase__ : Tuple = tax_mlp_wi_a lowercase__ : str = tax_mlp_wi_a else: lowercase__ : List[Any] = tax_mlp_wi lowercase__ : str = tax_mlp_wo lowercase__ : int = tax_mlp_layer_norm lowercase__ : List[str] = flax_model_encoder_layer_block # Only for layer 0: lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T lowercase__ : Optional[int] = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T lowercase__ : str = tax_encoder_global_rel_embedding # Assigning lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"] lowercase__ : Union[str, Any] = tax_encoder_norm # Decoder for layer_index in range(config.num_layers): lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}''' # Self-Attention lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"] lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"] lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"] lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"] # Layer Normalization lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"] lowercase__ : Any = tax_attention_key lowercase__ : List[Any] = tax_attention_out lowercase__ : Any = tax_attention_query lowercase__ : List[Any] = tax_attention_value lowercase__ : List[str] = tax_pre_attention_layer_norm lowercase__ : List[Any] = tax_enc_dec_attention_key lowercase__ : Optional[Any] = tax_enc_dec_attention_out lowercase__ : str = tax_enc_dec_attention_query lowercase__ : Union[str, Any] = tax_enc_dec_attention_value lowercase__ : Tuple = tax_cross_layer_norm if split_mlp_wi: lowercase__ : List[str] = tax_mlp_wi_a lowercase__ : List[Any] = tax_mlp_wi_a else: lowercase__ : Tuple = tax_mlp_wi lowercase__ : Any = tax_mlp_wo lowercase__ : Tuple = txa_mlp_layer_norm lowercase__ : int = flax_model_decoder_layer_block # Decoder Normalization lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"] lowercase__ : List[Any] = txa_decoder_norm # Only for layer 0: lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T lowercase__ : str = tax_decoder_rel_embedding # Token Embeddings lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"] lowercase__ : Optional[Any] = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(_lowerCamelCase) print("T5X Model was sucessfully converted!") if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) UpperCamelCase = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
333
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCamelCase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def lowercase_ ( ): lowercase__ : List[Any] = _ask_options( "In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowercase__ : int = get_sagemaker_input() else: lowercase__ : List[str] = get_cluster_input() return config def lowercase_ ( _lowerCamelCase : str=None): if subparsers is not None: lowercase__ : int = subparsers.add_parser("config" , description=_lowerCamelCase) else: lowercase__ : Dict = argparse.ArgumentParser("Accelerate config command" , description=_lowerCamelCase) parser.add_argument( "--config_file" , default=_lowerCamelCase , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=_lowerCamelCase) return parser def lowercase_ ( _lowerCamelCase : str): lowercase__ : str = get_user_input() if args.config_file is not None: lowercase__ : List[str] = args.config_file else: if not os.path.isdir(_lowerCamelCase): os.makedirs(_lowerCamelCase) lowercase__ : Tuple = default_yaml_config_file if config_file.endswith(".json"): config.to_json_file(_lowerCamelCase) else: config.to_yaml_file(_lowerCamelCase) print(f'''accelerate configuration saved at {config_file}''') def lowercase_ ( ): lowercase__ : List[Any] = config_command_parser() lowercase__ : List[Any] = parser.parse_args() config_command(_lowerCamelCase) if __name__ == "__main__": main()
333
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class snake_case_ ( __A ): __A : Optional[int] = "rwkv" __A : List[str] = {"max_position_embeddings": "context_length"} def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int: lowercase__ : List[str] = vocab_size lowercase__ : str = context_length lowercase__ : List[Any] = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size lowercase__ : List[Any] = layer_norm_epsilon lowercase__ : str = rescale_every lowercase__ : Optional[int] = use_cache lowercase__ : int = bos_token_id lowercase__ : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
333
1
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore UpperCamelCase = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" UpperCamelCase = [file for file in filepaths if file != file.lower()] if upper_files: print(f"{len(upper_files)} files contain uppercase characters:") print('''\n'''.join(upper_files) + '''\n''') UpperCamelCase = [file for file in filepaths if ''' ''' in file] if space_files: print(f"{len(space_files)} files contain space characters:") print('''\n'''.join(space_files) + '''\n''') UpperCamelCase = [file for file in filepaths if '''-''' in file] if hyphen_files: print(f"{len(hyphen_files)} files contain hyphen characters:") print('''\n'''.join(hyphen_files) + '''\n''') UpperCamelCase = [file for file in filepaths if os.sep not in file] if nodir_files: print(f"{len(nodir_files)} files are not in a directory:") print('''\n'''.join(nodir_files) + '''\n''') UpperCamelCase = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
333
class snake_case_ : def __init__( self : int ) -> Optional[int]: lowercase__ : Optional[int] = 0 lowercase__ : List[str] = 0 lowercase__ : Any = {} def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]: if vertex not in self.adjacency: lowercase__ : List[Any] = {} self.num_vertices += 1 def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]: self.add_vertex(lowercase_ ) self.add_vertex(lowercase_ ) if head == tail: return lowercase__ : int = weight lowercase__ : Any = weight def __UpperCamelCase ( self : Dict ) -> Optional[int]: lowercase__ : List[Any] = self.get_edges() for edge in edges: lowercase__ , lowercase__ , lowercase__ : int = edge edges.remove((tail, head, weight) ) for i in range(len(lowercase_ ) ): lowercase__ : Tuple = list(edges[i] ) edges.sort(key=lambda lowercase_ : e[2] ) for i in range(len(lowercase_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: lowercase__ : int = edges[i][2] + 1 for edge in edges: lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge lowercase__ : Union[str, Any] = weight lowercase__ : Dict = weight def __str__( self : str ) -> Any: lowercase__ : str = "" for tail in self.adjacency: for head in self.adjacency[tail]: lowercase__ : Optional[Any] = self.adjacency[head][tail] string += F'''{head} -> {tail} == {weight}\n''' return string.rstrip("\n" ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: lowercase__ : Any = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def __UpperCamelCase ( self : List[str] ) -> Dict: return self.adjacency.keys() @staticmethod def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]: lowercase__ : Any = Graph() if vertices is None: lowercase__ : str = [] if edges is None: lowercase__ : List[Any] = [] for vertex in vertices: g.add_vertex(lowercase_ ) for edge in edges: g.add_edge(*lowercase_ ) return g class snake_case_ : def __init__( self : int ) -> List[str]: lowercase__ : Dict = {} lowercase__ : Tuple = {} def __len__( self : Union[str, Any] ) -> Union[str, Any]: return len(self.parent ) def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple: if item in self.parent: return self.find(lowercase_ ) lowercase__ : Union[str, Any] = item lowercase__ : int = 0 return item def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any: if item not in self.parent: return self.make_set(lowercase_ ) if item != self.parent[item]: lowercase__ : Union[str, Any] = self.find(self.parent[item] ) return self.parent[item] def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]: lowercase__ : Dict = self.find(lowercase_ ) lowercase__ : Optional[int] = self.find(lowercase_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: lowercase__ : Dict = roota return roota if self.rank[roota] < self.rank[roota]: lowercase__ : int = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 lowercase__ : Tuple = roota return roota return None @staticmethod def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]: lowercase__ : List[Any] = graph.num_vertices lowercase__ : Optional[Any] = Graph.UnionFind() lowercase__ : int = [] while num_components > 1: lowercase__ : List[Any] = {} for vertex in graph.get_vertices(): lowercase__ : Any = -1 lowercase__ : List[str] = graph.get_edges() for edge in edges: lowercase__ , lowercase__ , lowercase__ : str = edge edges.remove((tail, head, weight) ) for edge in edges: lowercase__ , lowercase__ , lowercase__ : List[str] = edge lowercase__ : List[str] = union_find.find(lowercase_ ) lowercase__ : Union[str, Any] = union_find.find(lowercase_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowercase__ : int = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowercase__ : Dict = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex] if union_find.find(lowercase_ ) != union_find.find(lowercase_ ): union_find.union(lowercase_ , lowercase_ ) mst_edges.append(cheap_edge[vertex] ) lowercase__ : Optional[Any] = num_components - 1 lowercase__ : List[Any] = Graph.build(edges=lowercase_ ) return mst
333
1
class snake_case_ : def __init__( self : int ) -> Optional[int]: lowercase__ : Optional[int] = 0 lowercase__ : List[str] = 0 lowercase__ : Any = {} def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]: if vertex not in self.adjacency: lowercase__ : List[Any] = {} self.num_vertices += 1 def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]: self.add_vertex(lowercase_ ) self.add_vertex(lowercase_ ) if head == tail: return lowercase__ : int = weight lowercase__ : Any = weight def __UpperCamelCase ( self : Dict ) -> Optional[int]: lowercase__ : List[Any] = self.get_edges() for edge in edges: lowercase__ , lowercase__ , lowercase__ : int = edge edges.remove((tail, head, weight) ) for i in range(len(lowercase_ ) ): lowercase__ : Tuple = list(edges[i] ) edges.sort(key=lambda lowercase_ : e[2] ) for i in range(len(lowercase_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: lowercase__ : int = edges[i][2] + 1 for edge in edges: lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge lowercase__ : Union[str, Any] = weight lowercase__ : Dict = weight def __str__( self : str ) -> Any: lowercase__ : str = "" for tail in self.adjacency: for head in self.adjacency[tail]: lowercase__ : Optional[Any] = self.adjacency[head][tail] string += F'''{head} -> {tail} == {weight}\n''' return string.rstrip("\n" ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: lowercase__ : Any = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def __UpperCamelCase ( self : List[str] ) -> Dict: return self.adjacency.keys() @staticmethod def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]: lowercase__ : Any = Graph() if vertices is None: lowercase__ : str = [] if edges is None: lowercase__ : List[Any] = [] for vertex in vertices: g.add_vertex(lowercase_ ) for edge in edges: g.add_edge(*lowercase_ ) return g class snake_case_ : def __init__( self : int ) -> List[str]: lowercase__ : Dict = {} lowercase__ : Tuple = {} def __len__( self : Union[str, Any] ) -> Union[str, Any]: return len(self.parent ) def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple: if item in self.parent: return self.find(lowercase_ ) lowercase__ : Union[str, Any] = item lowercase__ : int = 0 return item def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any: if item not in self.parent: return self.make_set(lowercase_ ) if item != self.parent[item]: lowercase__ : Union[str, Any] = self.find(self.parent[item] ) return self.parent[item] def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]: lowercase__ : Dict = self.find(lowercase_ ) lowercase__ : Optional[int] = self.find(lowercase_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: lowercase__ : Dict = roota return roota if self.rank[roota] < self.rank[roota]: lowercase__ : int = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 lowercase__ : Tuple = roota return roota return None @staticmethod def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]: lowercase__ : List[Any] = graph.num_vertices lowercase__ : Optional[Any] = Graph.UnionFind() lowercase__ : int = [] while num_components > 1: lowercase__ : List[Any] = {} for vertex in graph.get_vertices(): lowercase__ : Any = -1 lowercase__ : List[str] = graph.get_edges() for edge in edges: lowercase__ , lowercase__ , lowercase__ : str = edge edges.remove((tail, head, weight) ) for edge in edges: lowercase__ , lowercase__ , lowercase__ : List[str] = edge lowercase__ : List[str] = union_find.find(lowercase_ ) lowercase__ : Union[str, Any] = union_find.find(lowercase_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowercase__ : int = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowercase__ : Dict = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex] if union_find.find(lowercase_ ) != union_find.find(lowercase_ ): union_find.union(lowercase_ , lowercase_ ) mst_edges.append(cheap_edge[vertex] ) lowercase__ : Optional[Any] = num_components - 1 lowercase__ : List[Any] = Graph.build(edges=lowercase_ ) return mst
333
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[Any] = DPTConfig() if "large" in checkpoint_url: lowercase__ : str = 1024 lowercase__ : List[str] = 4096 lowercase__ : List[Any] = 24 lowercase__ : Dict = 16 lowercase__ : Union[str, Any] = [5, 11, 17, 23] lowercase__ : Any = [256, 512, 1024, 1024] lowercase__ : Optional[int] = (1, 384, 384) if "ade" in checkpoint_url: lowercase__ : Union[str, Any] = True lowercase__ : Tuple = 150 lowercase__ : Optional[int] = "huggingface/label-files" lowercase__ : str = "ade20k-id2label.json" lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Optional[Any] = idalabel lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()} lowercase__ : Tuple = [1, 150, 480, 480] return config, expected_shape def lowercase_ ( _lowerCamelCase : List[Any]): lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Tuple): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder") if "pretrained.model" in name: lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings") if "patch_embed" in name: lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings") if "pos_embed" in name: lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings") if "attn.proj" in name: lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense") if "proj" in name and "project" not in name: lowercase__ : int = name.replace("proj" , "projection") if "blocks" in name: lowercase__ : List[str] = name.replace("blocks" , "layer") if "mlp.fc1" in name: lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense") if "mlp.fc2" in name: lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense") if "norm1" in name: lowercase__ : List[str] = name.replace("norm1" , "layernorm_before") if "norm2" in name: lowercase__ : Dict = name.replace("norm2" , "layernorm_after") if "scratch.output_conv" in name: lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head") if "scratch" in name: lowercase__ : str = name.replace("scratch" , "neck") if "layer1_rn" in name: lowercase__ : int = name.replace("layer1_rn" , "convs.0") if "layer2_rn" in name: lowercase__ : int = name.replace("layer2_rn" , "convs.1") if "layer3_rn" in name: lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2") if "layer4_rn" in name: lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3") if "refinenet" in name: lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1]) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''') if "out_conv" in name: lowercase__ : str = name.replace("out_conv" , "projection") if "resConfUnit1" in name: lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1") if "resConfUnit2" in name: lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2") if "conv1" in name: lowercase__ : List[Any] = name.replace("conv1" , "convolution1") if "conv2" in name: lowercase__ : Tuple = name.replace("conv2" , "convolution2") # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0") if "pretrained.act_postprocess2.0.project.0" in name: lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0") if "pretrained.act_postprocess3.0.project.0" in name: lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0") if "pretrained.act_postprocess4.0.project.0" in name: lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0") # resize blocks if "pretrained.act_postprocess1.3" in name: lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection") if "pretrained.act_postprocess1.4" in name: lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize") if "pretrained.act_postprocess2.3" in name: lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection") if "pretrained.act_postprocess2.4" in name: lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize") if "pretrained.act_postprocess3.3" in name: lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection") if "pretrained.act_postprocess4.3" in name: lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection") if "pretrained.act_postprocess4.4" in name: lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize") if "pretrained" in name: lowercase__ : Any = name.replace("pretrained" , "dpt") if "bn" in name: lowercase__ : str = name.replace("bn" , "batch_norm") if "head" in name: lowercase__ : Optional[Any] = name.replace("head" , "head.head") if "encoder.norm" in name: lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm") if "auxlayer" in name: lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head") return name def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''') lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''') # next, add query, keys and values (in that order) to the state dict lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :] lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size] lowercase__ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] lowercase__ : int = in_proj_bias[-config.hidden_size :] def lowercase_ ( ): lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw) return im @torch.no_grad() def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict): lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase) # load original state_dict from URL lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu") # remove certain keys remove_ignore_keys_(_lowerCamelCase) # rename keys for key in state_dict.copy().keys(): lowercase__ : List[str] = state_dict.pop(_lowerCamelCase) lowercase__ : List[Any] = val # read in qkv matrices read_in_q_k_v(_lowerCamelCase , _lowerCamelCase) # load HuggingFace model lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase) model.load_state_dict(_lowerCamelCase) model.eval() # Check outputs on an image lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384 lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase) lowercase__ : List[str] = prepare_img() lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt") # forward pass lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth # Assert logits lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]]) if "ade" in checkpoint_url: lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]]) assert outputs.shape == torch.Size(_lowerCamelCase) assert ( torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase) ) Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase) print(f'''Saving model to {pytorch_dump_folder_path}''') model.save_pretrained(_lowerCamelCase) print(f'''Saving image processor to {pytorch_dump_folder_path}''') image_processor.save_pretrained(_lowerCamelCase) if push_to_hub: print("Pushing model to hub...") model.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) UpperCamelCase = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
333
1
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 UpperCamelCase = { '''return_dict''': False, '''output_hidden_states''': True, '''output_attentions''': True, '''torchscript''': True, '''torch_dtype''': '''float16''', '''use_bfloat16''': True, '''tf_legacy_loss''': True, '''pruned_heads''': {'''a''': 1}, '''tie_word_embeddings''': False, '''is_decoder''': True, '''cross_attention_hidden_size''': 128, '''add_cross_attention''': True, '''tie_encoder_decoder''': True, '''max_length''': 50, '''min_length''': 3, '''do_sample''': True, '''early_stopping''': True, '''num_beams''': 3, '''num_beam_groups''': 3, '''diversity_penalty''': 0.5, '''temperature''': 2.0, '''top_k''': 10, '''top_p''': 0.7, '''typical_p''': 0.2, '''repetition_penalty''': 0.8, '''length_penalty''': 0.8, '''no_repeat_ngram_size''': 5, '''encoder_no_repeat_ngram_size''': 5, '''bad_words_ids''': [1, 2, 3], '''num_return_sequences''': 3, '''chunk_size_feed_forward''': 5, '''output_scores''': True, '''return_dict_in_generate''': True, '''forced_bos_token_id''': 2, '''forced_eos_token_id''': 3, '''remove_invalid_values''': True, '''architectures''': ['''BertModel'''], '''finetuning_task''': '''translation''', '''id2label''': {0: '''label'''}, '''label2id''': {'''label''': '''0'''}, '''tokenizer_class''': '''BertTokenizerFast''', '''prefix''': '''prefix''', '''bos_token_id''': 6, '''pad_token_id''': 7, '''eos_token_id''': 8, '''sep_token_id''': 9, '''decoder_start_token_id''': 10, '''exponential_decay_length_penalty''': (5, 1.01), '''suppress_tokens''': [0, 1], '''begin_suppress_tokens''': 2, '''task_specific_params''': {'''translation''': '''some_params'''}, '''problem_type''': '''regression''', } @is_staging_test class snake_case_ ( unittest.TestCase ): @classmethod def __UpperCamelCase ( cls : Any ) -> Dict: lowercase__ : int = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def __UpperCamelCase ( cls : Optional[Any] ) -> Optional[int]: try: delete_repo(token=cls._token , repo_id="test-config" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-config-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-config" ) except HTTPError: pass def __UpperCamelCase ( self : Union[str, Any] ) -> str: lowercase__ : str = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub("test-config" , use_auth_token=self._token ) lowercase__ : str = BertConfig.from_pretrained(F'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="test-config" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , repo_id="test-config" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase__ : Dict = BertConfig.from_pretrained(F'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) def __UpperCamelCase ( self : Tuple ) -> str: lowercase__ : Union[str, Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token ) lowercase__ : Optional[Any] = BertConfig.from_pretrained("valid_org/test-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-config-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id="valid_org/test-config-org" , push_to_hub=lowercase_ , use_auth_token=self._token ) lowercase__ : int = BertConfig.from_pretrained("valid_org/test-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) ) def __UpperCamelCase ( self : Any ) -> str: CustomConfig.register_for_auto_class() lowercase__ : Optional[Any] = CustomConfig(attribute=42 ) config.push_to_hub("test-dynamic-config" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} ) lowercase__ : List[Any] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=lowercase_ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , "CustomConfig" ) self.assertEqual(new_config.attribute , 42 ) class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Optional[Any] ) -> Any: lowercase__ : int = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated lowercase__ : Dict = c.n_embd + 1 # int lowercase__ : List[Any] = c.resid_pdrop + 1.0 # float lowercase__ : Tuple = not c.scale_attn_weights # bool lowercase__ : Optional[Any] = c.summary_type + "foo" # str c.update_from_string( F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(lowercase_ , c.n_embd , "mismatch for key: n_embd" ) self.assertEqual(lowercase_ , c.resid_pdrop , "mismatch for key: resid_pdrop" ) self.assertEqual(lowercase_ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" ) self.assertEqual(lowercase_ , c.summary_type , "mismatch for key: summary_type" ) def __UpperCamelCase ( self : Dict ) -> Optional[int]: lowercase__ : List[str] = PretrainedConfig() lowercase__ : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowercase_ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] ) lowercase__ : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowercase_ , lowercase_ )] if len(lowercase_ ) > 0: raise ValueError( "The following keys are set with the default values in" " `test_configuration_common.config_common_kwargs` pick another value for them:" F''' {", ".join(lowercase_ )}.''' ) def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: with self.assertRaises(lowercase_ ): # config is in subfolder, the following should not work without specifying the subfolder lowercase__ : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" ) lowercase__ : Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: # A mock response for an HTTP head request to emulate server down lowercase__ : int = mock.Mock() lowercase__ : Tuple = 5_00 lowercase__ : str = {} lowercase__ : Optional[int] = HTTPError lowercase__ : List[Any] = {} # Download this model to make sure it's in the cache. lowercase__ : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=lowercase_ ) as mock_head: lowercase__ : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() def __UpperCamelCase ( self : Optional[int] ) -> Any: # This test is for deprecated behavior and can be removed in v5 lowercase__ : Optional[Any] = BertConfig.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" ) def __UpperCamelCase ( self : int ) -> List[str]: lowercase__ : Tuple = AutoConfig.from_pretrained("bert-base-cased" ) lowercase__ : str = ["config.4.0.0.json"] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowercase_ ) lowercase__ : str = 2 json.dump(configuration.to_dict() , open(os.path.join(lowercase_ , "config.4.0.0.json" ) , "w" ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 lowercase__ : Dict = AutoConfig.from_pretrained(lowercase_ ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 lowercase__ : Dict = ["config.42.0.0.json"] lowercase__ : Union[str, Any] = 7_68 configuration.save_pretrained(lowercase_ ) shutil.move(os.path.join(lowercase_ , "config.4.0.0.json" ) , os.path.join(lowercase_ , "config.42.0.0.json" ) ) lowercase__ : Optional[int] = AutoConfig.from_pretrained(lowercase_ ) self.assertEqual(new_configuration.hidden_size , 7_68 ) def __UpperCamelCase ( self : Any ) -> Tuple: # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. lowercase__ : Any = "hf-internal-testing/test-two-configs" import transformers as new_transformers lowercase__ : List[Any] = "v4.0.0" lowercase__ , lowercase__ : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained( lowercase_ , return_unused_kwargs=lowercase_ ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowercase_ , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers lowercase__ : List[Any] = "v3.0.0" lowercase__ : Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(lowercase_ ) self.assertEqual(old_configuration.hidden_size , 7_68 )
333
def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000): lowercase__ : Union[str, Any] = 1 lowercase__ : int = 0 for divide_by_number in range(_lowerCamelCase , digit + 1): lowercase__ : list[int] = [] lowercase__ : Dict = numerator for _ in range(1 , digit + 1): if now_divide in has_been_divided: if longest_list_length < len(_lowerCamelCase): lowercase__ : Union[str, Any] = len(_lowerCamelCase) lowercase__ : Optional[int] = divide_by_number else: has_been_divided.append(_lowerCamelCase) lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
333
1
from typing import Dict, Optional import numpy as np import datasets UpperCamelCase = ''' IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. ''' UpperCamelCase = ''' Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric("mean_iou") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} ''' UpperCamelCase = '''\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }''' def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : bool , _lowerCamelCase : Optional[Dict[int, int]] = None , _lowerCamelCase : bool = False , ): if label_map is not None: for old_id, new_id in label_map.items(): lowercase__ : Union[str, Any] = new_id # turn into Numpy arrays lowercase__ : Union[str, Any] = np.array(_lowerCamelCase) lowercase__ : Optional[int] = np.array(_lowerCamelCase) if reduce_labels: lowercase__ : Tuple = 255 lowercase__ : Tuple = label - 1 lowercase__ : Optional[Any] = 255 lowercase__ : Union[str, Any] = label != ignore_index lowercase__ : str = np.not_equal(_lowerCamelCase , _lowerCamelCase) lowercase__ : Any = pred_label[mask] lowercase__ : Union[str, Any] = np.array(_lowerCamelCase)[mask] lowercase__ : Optional[int] = pred_label[pred_label == label] lowercase__ : Dict = np.histogram(_lowerCamelCase , bins=_lowerCamelCase , range=(0, num_labels - 1))[0] lowercase__ : Any = np.histogram(_lowerCamelCase , bins=_lowerCamelCase , range=(0, num_labels - 1))[0] lowercase__ : Union[str, Any] = np.histogram(_lowerCamelCase , bins=_lowerCamelCase , range=(0, num_labels - 1))[0] lowercase__ : Tuple = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : bool , _lowerCamelCase : Optional[Dict[int, int]] = None , _lowerCamelCase : bool = False , ): lowercase__ : Dict = np.zeros((num_labels,) , dtype=np.floataa) lowercase__ : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa) lowercase__ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa) lowercase__ : List[str] = np.zeros((num_labels,) , dtype=np.floataa) for result, gt_seg_map in zip(_lowerCamelCase , _lowerCamelCase): lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = intersect_and_union( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : bool , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[Dict[int, int]] = None , _lowerCamelCase : bool = False , ): lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = total_intersect_and_union( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # compute metrics lowercase__ : Any = {} lowercase__ : List[Any] = total_area_intersect.sum() / total_area_label.sum() lowercase__ : str = total_area_intersect / total_area_union lowercase__ : List[Any] = total_area_intersect / total_area_label lowercase__ : List[Any] = np.nanmean(_lowerCamelCase) lowercase__ : Union[str, Any] = np.nanmean(_lowerCamelCase) lowercase__ : Tuple = all_acc lowercase__ : Optional[Any] = iou lowercase__ : Dict = acc if nan_to_num is not None: lowercase__ : Union[str, Any] = {metric: np.nan_to_num(_lowerCamelCase , nan=_lowerCamelCase) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : List[Any] ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), } ) , reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ] , ) def __UpperCamelCase ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : bool , lowercase_ : Optional[int] = None , lowercase_ : Optional[Dict[int, int]] = None , lowercase_ : bool = False , ) -> Optional[Any]: lowercase__ : Dict = mean_iou( results=lowercase_ , gt_seg_maps=lowercase_ , num_labels=lowercase_ , ignore_index=lowercase_ , nan_to_num=lowercase_ , label_map=lowercase_ , reduce_labels=lowercase_ , ) return iou_result
333
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ): __A : int = StableUnCLIPPipeline __A : int = TEXT_TO_IMAGE_PARAMS __A : Any = TEXT_TO_IMAGE_BATCH_PARAMS __A : int = TEXT_TO_IMAGE_IMAGE_PARAMS __A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __A : int = False def __UpperCamelCase ( self : Optional[int] ) -> List[str]: lowercase__ : str = 32 lowercase__ : Any = embedder_hidden_size # prior components torch.manual_seed(0 ) lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowercase__ : List[str] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) lowercase__ : Any = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , ) torch.manual_seed(0 ) lowercase__ : Union[str, Any] = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ ) lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowercase__ : Tuple = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) lowercase__ : str = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , ) torch.manual_seed(0 ) lowercase__ : Any = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , ) torch.manual_seed(0 ) lowercase__ : List[str] = AutoencoderKL() lowercase__ : List[Any] = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any: if str(lowercase_ ).startswith("mps" ): lowercase__ : Any = torch.manual_seed(lowercase_ ) else: lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase__ : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: lowercase__ : Union[str, Any] = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ ) def __UpperCamelCase ( self : List[Any] ) -> List[str]: lowercase__ : str = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowercase_ ) @slow @require_torch_gpu class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Tuple ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : int ) -> int: lowercase__ : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" ) lowercase__ : Optional[int] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) lowercase__ : int = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase__ : str = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) lowercase__ : Any = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
333
1
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "): lowercase__ : Union[str, Any] = text.split(_lowerCamelCase) return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)] def lowercase_ ( _lowerCamelCase : dict): lowercase__ , lowercase__ : List[str] = [], [] for title, text in zip(documents["title"] , documents["text"]): if text is not None: for passage in split_text(_lowerCamelCase): titles.append(title if title is not None else "") texts.append(_lowerCamelCase) return {"title": titles, "text": texts} def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast): lowercase__ : Union[str, Any] = ctx_tokenizer( documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"] lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ): ###################################### logger.info("Step 1 - Create the dataset") ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase__ : str = load_dataset( "csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"]) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc) # And compute the embeddings lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase) lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) lowercase__ : List[Any] = Features( {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space lowercase__ : List[Any] = dataset.map( partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , ) # And finally save your dataset lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset") dataset.save_to_disk(_lowerCamelCase) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset") ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT) dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase) # And save the index lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss") dataset.get_index("embeddings").save(_lowerCamelCase) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class snake_case_ : __A : str = field( default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,) __A : Optional[str] = field( default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,) __A : str = field( default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,) __A : str = field( default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } ,) __A : Optional[str] = field( default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,) @dataclass class snake_case_ : __A : Optional[int] = field( default=__A ,metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } ,) __A : int = field( default=16 ,metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } ,) @dataclass class snake_case_ : __A : int = field( default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,) __A : int = field( default=128 ,metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } ,) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
333
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False): try: lowercase__ : str = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : Union[str, Any] = default else: # KEY is set, convert it to True or False. try: lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) def lowercase_ ( _lowerCamelCase : int): return unittest.skip("Test was skipped")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Tuple): return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Dict): return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless( is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]): return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : str): return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None): if test_case is None: return partial(_lowerCamelCase , version=_lowerCamelCase) return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]): return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase) UpperCamelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless( _atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase) class snake_case_ ( unittest.TestCase ): __A : int = True @classmethod def __UpperCamelCase ( cls : str ) -> str: lowercase__ : str = tempfile.mkdtemp() @classmethod def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __UpperCamelCase ( self : str ) -> Optional[int]: if self.clear_on_setup: for path in Path(self.tmpdir ).glob("**/*" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(lowercase_ ) class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str: lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase_ ( _lowerCamelCase : int): lowercase__ : Tuple = AcceleratorState() lowercase__ : Optional[int] = tensor[None].clone().to(state.device) lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu() lowercase__ : Optional[Any] = tensor[0].cpu() for i in range(tensors.shape[0]): if not torch.equal(tensors[i] , _lowerCamelCase): return False return True class snake_case_ : def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]: lowercase__ : int = returncode lowercase__ : Dict = stdout lowercase__ : List[Any] = stderr async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str): while True: lowercase__ : int = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : str = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : Tuple = [] lowercase__ : List[Any] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))), asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True): lowercase__ : Optional[Any] = asyncio.get_event_loop() lowercase__ : List[Any] = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : str = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Dict = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') return result class snake_case_ ( __A ): pass def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False): try: lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT) if return_stdout: if hasattr(_lowerCamelCase , "decode"): lowercase__ : Optional[Any] = output.decode("utf-8") return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
333
1
from scipy.stats import pearsonr import datasets UpperCamelCase = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' UpperCamelCase = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' UpperCamelCase = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : List[str] ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Tuple=False ) -> Optional[Any]: if return_pvalue: lowercase__ : List[Any] = pearsonr(lowercase_ , lowercase_ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowercase_ , lowercase_ )[0] )}
333
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : int = ["flax"] def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Optional[Any] = ["flax"] def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Optional[int] = ["flax"] def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : int = ["flax"] def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[str] = ["flax"] def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]: requires_backends(cls , ["flax"] )
333
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class snake_case_ ( __A ): __A : Optional[Any] = "dpr" def __init__( self : List[Any] , lowercase_ : Union[str, Any]=3_05_22 , lowercase_ : List[str]=7_68 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int=30_72 , lowercase_ : Dict="gelu" , lowercase_ : Dict=0.1 , lowercase_ : int=0.1 , lowercase_ : List[Any]=5_12 , lowercase_ : int=2 , lowercase_ : Tuple=0.02 , lowercase_ : Union[str, Any]=1E-12 , lowercase_ : str=0 , lowercase_ : Any="absolute" , lowercase_ : int = 0 , **lowercase_ : List[str] , ) -> str: super().__init__(pad_token_id=lowercase_ , **lowercase_ ) lowercase__ : Optional[Any] = vocab_size lowercase__ : Dict = hidden_size lowercase__ : Tuple = num_hidden_layers lowercase__ : Dict = num_attention_heads lowercase__ : Optional[Any] = hidden_act lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Dict = hidden_dropout_prob lowercase__ : Optional[Any] = attention_probs_dropout_prob lowercase__ : str = max_position_embeddings lowercase__ : Optional[int] = type_vocab_size lowercase__ : str = initializer_range lowercase__ : List[str] = layer_norm_eps lowercase__ : List[str] = projection_dim lowercase__ : Any = position_embedding_type
333
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class snake_case_ ( __A ): __A : List[str] = "vit_mae" def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]: super().__init__(**lowercase_ ) lowercase__ : List[str] = hidden_size lowercase__ : str = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : List[Any] = intermediate_size lowercase__ : str = hidden_act lowercase__ : List[str] = hidden_dropout_prob lowercase__ : Optional[Any] = attention_probs_dropout_prob lowercase__ : Any = initializer_range lowercase__ : Optional[Any] = layer_norm_eps lowercase__ : Optional[Any] = image_size lowercase__ : Optional[int] = patch_size lowercase__ : Any = num_channels lowercase__ : str = qkv_bias lowercase__ : Optional[Any] = decoder_num_attention_heads lowercase__ : Any = decoder_hidden_size lowercase__ : Any = decoder_num_hidden_layers lowercase__ : Union[str, Any] = decoder_intermediate_size lowercase__ : int = mask_ratio lowercase__ : Tuple = norm_pix_loss
333
1
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case_ : def __init__( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict=13 , lowercase_ : str=3 , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=0.1 , lowercase_ : int=0.1 , lowercase_ : Dict=2_24 , lowercase_ : Dict=10_00 , lowercase_ : str=[3, 3, 6, 4] , lowercase_ : Dict=[48, 56, 1_12, 2_20] , ) -> Optional[Any]: lowercase__ : Tuple = parent lowercase__ : Any = batch_size lowercase__ : Any = num_channels lowercase__ : Optional[Any] = is_training lowercase__ : Optional[int] = use_labels lowercase__ : Optional[int] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : Optional[Any] = num_labels lowercase__ : Tuple = image_size lowercase__ : int = layer_depths lowercase__ : Tuple = embed_dims def __UpperCamelCase ( self : int ) -> Union[str, Any]: lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : List[Any] = None if self.use_labels: lowercase__ : int = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ : List[str] = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self : List[str] ) -> Tuple: return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase_ , layer_scale_init_value=1E-5 , ) def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> int: lowercase__ : str = SwiftFormerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : str = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Union[str, Any] ) -> Optional[Any]: lowercase__ : Tuple = self.num_labels lowercase__ : List[Any] = SwiftFormerForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : str = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) lowercase__ : Optional[Any] = SwiftFormerForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : str = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: ((lowercase__) , (lowercase__) , (lowercase__)) : Optional[Any] = self.prepare_config_and_inputs() lowercase__ : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case_ ( __A ,__A ,unittest.TestCase ): __A : int = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () __A : List[Any] = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) __A : int = False __A : Optional[int] = False __A : Union[str, Any] = False __A : List[str] = False __A : Union[str, Any] = False def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: lowercase__ : Optional[int] = SwiftFormerModelTester(self ) lowercase__ : List[Any] = ConfigTester( self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def __UpperCamelCase ( self : Dict ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds" ) def __UpperCamelCase ( self : str ) -> Optional[Any]: pass def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(lowercase_ ) lowercase__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def __UpperCamelCase ( self : Any ) -> int: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(lowercase_ ) lowercase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_ ) def __UpperCamelCase ( self : Dict ) -> int: lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def __UpperCamelCase ( self : Optional[Any] ) -> str: lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Tuple = SwiftFormerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @unittest.skip(reason="SwiftFormer does not output attentions" ) def __UpperCamelCase ( self : Dict ) -> int: pass def __UpperCamelCase ( self : int ) -> Dict: def check_hidden_states_output(lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[str] ): lowercase__ : Union[str, Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase__ : Union[str, Any] = outputs.hidden_states lowercase__ : int = 8 self.assertEqual(len(lowercase_ ) , lowercase_ ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(lowercase_ ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Tuple = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def __UpperCamelCase ( self : List[str] ) -> Optional[Any]: def _config_zero_init(lowercase_ : List[str] ): lowercase__ : str = copy.deepcopy(lowercase_ ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(lowercase_ , lowercase_ , 1E-10 ) if isinstance(getattr(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ ): lowercase__ : Tuple = _config_zero_init(getattr(lowercase_ , lowercase_ ) ) setattr(lowercase_ , lowercase_ , lowercase_ ) return configs_no_init lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Any = _config_zero_init(lowercase_ ) for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(config=lowercase_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __UpperCamelCase ( self : Any ) -> Tuple: pass def lowercase_ ( ): lowercase__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class snake_case_ ( unittest.TestCase ): @cached_property def __UpperCamelCase ( self : List[Any] ) -> Tuple: return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None @slow def __UpperCamelCase ( self : Optional[int] ) -> int: lowercase__ : str = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowercase_ ) lowercase__ : List[Any] = self.default_image_processor lowercase__ : Optional[int] = prepare_img() lowercase__ : Any = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase__ : str = model(**lowercase_ ) # verify the logits lowercase__ : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase__ : Tuple = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
333
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): while a != 0: lowercase__ , lowercase__ : Dict = b % a, a return b def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): if gcd(_lowerCamelCase , _lowerCamelCase) != 1: lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(_lowerCamelCase) lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m while va != 0: lowercase__ : Tuple = ua // va lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
333
1
import sys import turtle def lowercase_ ( _lowerCamelCase : tuple[float, float] , _lowerCamelCase : tuple[float, float]): return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowercase_ ( _lowerCamelCase : tuple[float, float] , _lowerCamelCase : tuple[float, float] , _lowerCamelCase : tuple[float, float] , _lowerCamelCase : int , ): my_pen.up() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) if depth == 0: return triangle(_lowerCamelCase , get_mid(_lowerCamelCase , _lowerCamelCase) , get_mid(_lowerCamelCase , _lowerCamelCase) , depth - 1) triangle(_lowerCamelCase , get_mid(_lowerCamelCase , _lowerCamelCase) , get_mid(_lowerCamelCase , _lowerCamelCase) , depth - 1) triangle(_lowerCamelCase , get_mid(_lowerCamelCase , _lowerCamelCase) , get_mid(_lowerCamelCase , _lowerCamelCase) , depth - 1) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) UpperCamelCase = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') UpperCamelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
333
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "): lowercase__ : Union[str, Any] = text.split(_lowerCamelCase) return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)] def lowercase_ ( _lowerCamelCase : dict): lowercase__ , lowercase__ : List[str] = [], [] for title, text in zip(documents["title"] , documents["text"]): if text is not None: for passage in split_text(_lowerCamelCase): titles.append(title if title is not None else "") texts.append(_lowerCamelCase) return {"title": titles, "text": texts} def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast): lowercase__ : Union[str, Any] = ctx_tokenizer( documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"] lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ): ###################################### logger.info("Step 1 - Create the dataset") ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase__ : str = load_dataset( "csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"]) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc) # And compute the embeddings lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase) lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) lowercase__ : List[Any] = Features( {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space lowercase__ : List[Any] = dataset.map( partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , ) # And finally save your dataset lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset") dataset.save_to_disk(_lowerCamelCase) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset") ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT) dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase) # And save the index lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss") dataset.get_index("embeddings").save(_lowerCamelCase) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class snake_case_ : __A : str = field( default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,) __A : Optional[str] = field( default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,) __A : str = field( default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,) __A : str = field( default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } ,) __A : Optional[str] = field( default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,) @dataclass class snake_case_ : __A : Optional[int] = field( default=__A ,metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } ,) __A : int = field( default=16 ,metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } ,) @dataclass class snake_case_ : __A : int = field( default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,) __A : int = field( default=128 ,metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } ,) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
333
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: lowercase__ : Union[str, Any] = "ZinengTang/tvlt-base" lowercase__ : List[Any] = tempfile.mkdtemp() def __UpperCamelCase ( self : int , **lowercase_ : Dict ) -> str: return TvltImageProcessor.from_pretrained(self.checkpoint , **lowercase_ ) def __UpperCamelCase ( self : Tuple , **lowercase_ : List[str] ) -> Optional[Any]: return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ ) def __UpperCamelCase ( self : Dict ) -> Any: shutil.rmtree(self.tmpdirname ) def __UpperCamelCase ( self : Optional[Any] ) -> str: lowercase__ : List[Any] = self.get_image_processor() lowercase__ : Union[str, Any] = self.get_feature_extractor() lowercase__ : Union[str, Any] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase__ : List[Any] = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , lowercase_ ) self.assertIsInstance(processor.image_processor , lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: lowercase__ : Any = self.get_image_processor() lowercase__ : Any = self.get_feature_extractor() lowercase__ : int = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ ) lowercase__ : Dict = np.ones([1_20_00] ) lowercase__ : Tuple = feature_extractor(lowercase_ , return_tensors="np" ) lowercase__ : List[Any] = processor(audio=lowercase_ , return_tensors="np" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: lowercase__ : int = self.get_image_processor() lowercase__ : str = self.get_feature_extractor() lowercase__ : Tuple = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ ) lowercase__ : List[Any] = np.ones([3, 2_24, 2_24] ) lowercase__ : Optional[Any] = image_processor(lowercase_ , return_tensors="np" ) lowercase__ : Any = processor(images=lowercase_ , return_tensors="np" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: lowercase__ : Tuple = self.get_image_processor() lowercase__ : List[Any] = self.get_feature_extractor() lowercase__ : int = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ ) lowercase__ : str = np.ones([1_20_00] ) lowercase__ : Dict = np.ones([3, 2_24, 2_24] ) lowercase__ : str = processor(audio=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def __UpperCamelCase ( self : List[Any] ) -> int: lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : int = self.get_feature_extractor() lowercase__ : Optional[Any] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
333
import argparse import datetime def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[Any] = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_lowerCamelCase) < 11: raise ValueError("Must be 10 characters long") # Get month lowercase__ : int = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12") lowercase__ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'") # Get day lowercase__ : int = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31") # Get second separator lowercase__ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'") # Get year lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?") # Get datetime obj for validation lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase)) # Start math if m <= 2: lowercase__ : Optional[Any] = y - 1 lowercase__ : int = m + 12 # maths var lowercase__ : int = int(str(_lowerCamelCase)[:2]) lowercase__ : int = int(str(_lowerCamelCase)[2:]) lowercase__ : int = int(2.6 * m - 5.39) lowercase__ : int = int(c / 4) lowercase__ : int = int(k / 4) lowercase__ : int = int(d + k) lowercase__ : int = int(t + u + v + x) lowercase__ : int = int(z - (2 * c)) lowercase__ : int = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer.") # Response lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) UpperCamelCase = parser.parse_args() zeller(args.date_input)
333
1
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class snake_case_ ( __A ): def __init__( self : Dict , lowercase_ : Any , lowercase_ : str ) -> List[str]: lowercase__ : Tuple = params lowercase__ : int = np.array(lowercase_ ) lowercase__ : Tuple = np.array([len(lowercase_ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : str , lowercase_ : List[str] ) -> List[str]: return (self.token_ids[index], self.lengths[index]) def __len__( self : Tuple ) -> Optional[int]: return len(self.lengths ) def __UpperCamelCase ( self : Optional[Any] ) -> List[str]: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __UpperCamelCase ( self : Optional[Any] ) -> Tuple: lowercase__ : int = self.params.max_model_input_size lowercase__ : Union[str, Any] = self.lengths > max_len logger.info(F'''Splitting {sum(lowercase_ )} too long sequences.''' ) def divide_chunks(lowercase_ : List[str] , lowercase_ : Tuple ): return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )] lowercase__ : List[str] = [] lowercase__ : str = [] if self.params.mlm: lowercase__ , lowercase__ : int = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: lowercase__ , lowercase__ : Dict = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: lowercase__ : Optional[Any] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: lowercase__ : List[str] = np.insert(lowercase_ , 0 , lowercase_ ) if sub_s[-1] != sep_id: lowercase__ : int = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ ) assert len(lowercase_ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(lowercase_ ) new_tok_ids.extend(lowercase_ ) new_lengths.extend([len(lowercase_ ) for l in sub_seqs] ) lowercase__ : List[str] = np.array(lowercase_ ) lowercase__ : str = np.array(lowercase_ ) def __UpperCamelCase ( self : Optional[int] ) -> str: lowercase__ : Dict = len(self ) lowercase__ : Optional[Any] = self.lengths > 11 lowercase__ : int = self.token_ids[indices] lowercase__ : Dict = self.lengths[indices] lowercase__ : Optional[Any] = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def __UpperCamelCase ( self : List[Any] ) -> Optional[int]: if "unk_token" not in self.params.special_tok_ids: return else: lowercase__ : Optional[Any] = self.params.special_tok_ids["unk_token"] lowercase__ : Tuple = len(self ) lowercase__ : Any = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) lowercase__ : Optional[int] = (unk_occs / self.lengths) < 0.5 lowercase__ : List[str] = self.token_ids[indices] lowercase__ : Optional[Any] = self.lengths[indices] lowercase__ : Tuple = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def __UpperCamelCase ( self : str ) -> List[str]: if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __UpperCamelCase ( self : str , lowercase_ : List[str] ) -> int: lowercase__ : Optional[int] = [t[0] for t in batch] lowercase__ : Union[str, Any] = [t[1] for t in batch] assert len(lowercase_ ) == len(lowercase_ ) # Max for paddings lowercase__ : Optional[Any] = max(lowercase_ ) # Pad token ids if self.params.mlm: lowercase__ : Tuple = self.params.special_tok_ids["pad_token"] else: lowercase__ : Optional[Any] = self.params.special_tok_ids["unk_token"] lowercase__ : int = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids] assert len(tk_ ) == len(lowercase_ ) assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ ) lowercase__ : str = torch.tensor(tk_ ) # (bs, max_seq_len_) lowercase__ : Union[str, Any] = torch.tensor(lowercase_ ) # (bs) return tk_t, lg_t
333
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node UpperCamelCase = 4 UpperCamelCase = 3 class snake_case_ ( __A ): pass def lowercase_ ( _lowerCamelCase : List[str]): for shard in shards: for i in range(_lowerCamelCase): yield {"i": i, "shard": shard} def lowercase_ ( ): lowercase__ : List[str] = int(os.environ["RANK"]) lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"]) lowercase__ : Union[str, Any] = ArgumentParser() parser.add_argument("--streaming" , type=_lowerCamelCase) parser.add_argument("--local_rank" , type=_lowerCamelCase) parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0) lowercase__ : int = parser.parse_args() lowercase__ : Union[str, Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]} lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase) if not streaming: lowercase__ : str = Dataset.from_list(list(_lowerCamelCase)) lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase) lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase) lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : Any = full_size // world_size expected_local_size += int(rank < (full_size % world_size)) lowercase__ : List[str] = sum(1 for _ in dataloader) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''') if __name__ == "__main__": main()
333
1
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]): lowercase__ : str = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple): lowercase__ : Optional[int] = 0 while b > 0: if b & 1: lowercase__ : Optional[int] = ((res % c) + (a % c)) % c a += a b >>= 1 return res
333
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class snake_case_ ( __A ): __A : List[str] = "unispeech" def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any: super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ ) lowercase__ : List[str] = hidden_size lowercase__ : Any = feat_extract_norm lowercase__ : Optional[Any] = feat_extract_activation lowercase__ : Dict = list(lowercase_ ) lowercase__ : Union[str, Any] = list(lowercase_ ) lowercase__ : List[str] = list(lowercase_ ) lowercase__ : List[str] = conv_bias lowercase__ : Any = num_conv_pos_embeddings lowercase__ : Dict = num_conv_pos_embedding_groups lowercase__ : int = len(self.conv_dim ) lowercase__ : str = num_hidden_layers lowercase__ : Any = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : int = num_attention_heads lowercase__ : Union[str, Any] = hidden_dropout lowercase__ : Any = attention_dropout lowercase__ : Union[str, Any] = activation_dropout lowercase__ : Any = feat_proj_dropout lowercase__ : str = final_dropout lowercase__ : int = layerdrop lowercase__ : Optional[int] = layer_norm_eps lowercase__ : List[Any] = initializer_range lowercase__ : Any = num_ctc_classes lowercase__ : int = vocab_size lowercase__ : str = do_stable_layer_norm lowercase__ : Any = use_weighted_layer_sum lowercase__ : Dict = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__ : List[Any] = apply_spec_augment lowercase__ : Dict = mask_time_prob lowercase__ : Tuple = mask_time_length lowercase__ : str = mask_time_min_masks lowercase__ : List[Any] = mask_feature_prob lowercase__ : int = mask_feature_length lowercase__ : Optional[int] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase__ : Optional[int] = num_codevectors_per_group lowercase__ : List[str] = num_codevector_groups lowercase__ : Dict = contrastive_logits_temperature lowercase__ : Tuple = feat_quantizer_dropout lowercase__ : Any = num_negatives lowercase__ : Dict = codevector_dim lowercase__ : Tuple = proj_codevector_dim lowercase__ : List[str] = diversity_loss_weight # ctc loss lowercase__ : Tuple = ctc_loss_reduction lowercase__ : Dict = ctc_zero_infinity # pretraining loss lowercase__ : Optional[Any] = replace_prob @property def __UpperCamelCase ( self : Dict ) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1 )
333
1
import requests from bsa import BeautifulSoup def lowercase_ ( _lowerCamelCase : str = "https://www.worldometers.info/coronavirus"): lowercase__ : str = BeautifulSoup(requests.get(_lowerCamelCase).text , "html.parser") lowercase__ : Union[str, Any] = soup.findAll("h1") lowercase__ : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"}) keys += soup.findAll("span" , {"class": "panel-title"}) values += soup.findAll("div" , {"class": "number-table-main"}) return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase)} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(f"{key}\n{value}\n")
333
def lowercase_ ( _lowerCamelCase : list): for i in range(len(_lowerCamelCase) - 1 , 0 , -1): lowercase__ : int = False for j in range(_lowerCamelCase , 0 , -1): if unsorted[j] < unsorted[j - 1]: lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j] lowercase__ : List[str] = True for j in range(_lowerCamelCase): if unsorted[j] > unsorted[j + 1]: lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j] lowercase__ : Dict = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(f"{cocktail_shaker_sort(unsorted) = }")
333
1
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[Any] ) -> Any: lowercase__ : List[Any] = tempfile.mkdtemp() lowercase__ : Optional[Any] = SamImageProcessor() lowercase__ : Dict = SamProcessor(lowercase_ ) processor.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self : Dict , **lowercase_ : List[Any] ) -> Dict: return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor def __UpperCamelCase ( self : Any ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def __UpperCamelCase ( self : Tuple ) -> Dict: lowercase__ : str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] lowercase__ : List[str] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCamelCase ( self : int ) -> Optional[Any]: lowercase__ : Union[str, Any] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : List[str] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) lowercase__ : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Any = SamProcessor(image_processor=lowercase_ ) lowercase__ : int = self.prepare_image_inputs() lowercase__ : Tuple = image_processor(lowercase_ , return_tensors="np" ) lowercase__ : List[str] = processor(images=lowercase_ , return_tensors="np" ) input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : Union[str, Any] = self.get_image_processor() lowercase__ : Optional[int] = SamProcessor(image_processor=lowercase_ ) lowercase__ : Union[str, Any] = [torch.ones((1, 3, 5, 5) )] lowercase__ : Optional[int] = [[17_64, 26_46]] lowercase__ : str = [[6_83, 10_24]] lowercase__ : List[Any] = processor.post_process_masks(lowercase_ , lowercase_ , lowercase_ ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) lowercase__ : Optional[Any] = processor.post_process_masks( lowercase_ , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np lowercase__ : Any = [np.ones((1, 3, 5, 5) )] lowercase__ : Optional[int] = processor.post_process_masks(lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) lowercase__ : Optional[Any] = [[1, 0], [0, 1]] with self.assertRaises(lowercase_ ): lowercase__ : Union[str, Any] = processor.post_process_masks(lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) ) @require_vision @require_tf class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Dict ) -> Dict: lowercase__ : int = tempfile.mkdtemp() lowercase__ : Dict = SamImageProcessor() lowercase__ : Optional[int] = SamProcessor(lowercase_ ) processor.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Optional[Any] ) -> str: return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor def __UpperCamelCase ( self : str ) -> Dict: shutil.rmtree(self.tmpdirname ) def __UpperCamelCase ( self : int ) -> Any: lowercase__ : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] lowercase__ : int = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCamelCase ( self : Optional[Any] ) -> Dict: lowercase__ : Optional[Any] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Optional[int] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) lowercase__ : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def __UpperCamelCase ( self : Any ) -> List[Any]: lowercase__ : List[str] = self.get_image_processor() lowercase__ : Tuple = SamProcessor(image_processor=lowercase_ ) lowercase__ : int = self.prepare_image_inputs() lowercase__ : Optional[int] = image_processor(lowercase_ , return_tensors="np" ) lowercase__ : str = processor(images=lowercase_ , return_tensors="np" ) input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def __UpperCamelCase ( self : int ) -> Dict: lowercase__ : List[str] = self.get_image_processor() lowercase__ : str = SamProcessor(image_processor=lowercase_ ) lowercase__ : Dict = [tf.ones((1, 3, 5, 5) )] lowercase__ : Optional[Any] = [[17_64, 26_46]] lowercase__ : Any = [[6_83, 10_24]] lowercase__ : List[Any] = processor.post_process_masks(lowercase_ , lowercase_ , lowercase_ , return_tensors="tf" ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) lowercase__ : Any = processor.post_process_masks( lowercase_ , tf.convert_to_tensor(lowercase_ ) , tf.convert_to_tensor(lowercase_ ) , return_tensors="tf" , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np lowercase__ : Tuple = [np.ones((1, 3, 5, 5) )] lowercase__ : Union[str, Any] = processor.post_process_masks( lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) , return_tensors="tf" ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) lowercase__ : Tuple = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): lowercase__ : str = processor.post_process_masks( lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) , return_tensors="tf" ) @require_vision @require_torchvision class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[Any] ) -> Optional[int]: lowercase__ : str = tempfile.mkdtemp() lowercase__ : Tuple = SamImageProcessor() lowercase__ : Dict = SamProcessor(lowercase_ ) processor.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor def __UpperCamelCase ( self : Any ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]: lowercase__ : Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] lowercase__ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: lowercase__ : int = self.get_image_processor() lowercase__ : Optional[int] = SamProcessor(image_processor=lowercase_ ) lowercase__ : int = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) lowercase__ : List[str] = [tf.convert_to_tensor(lowercase_ )] lowercase__ : Dict = [torch.tensor(lowercase_ )] lowercase__ : int = [[17_64, 26_46]] lowercase__ : Dict = [[6_83, 10_24]] lowercase__ : str = processor.post_process_masks( lowercase_ , lowercase_ , lowercase_ , return_tensors="tf" ) lowercase__ : Tuple = processor.post_process_masks( lowercase_ , lowercase_ , lowercase_ , return_tensors="pt" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: lowercase__ : List[str] = self.get_image_processor() lowercase__ : Any = SamProcessor(image_processor=lowercase_ ) lowercase__ : Optional[int] = self.prepare_image_inputs() lowercase__ : Any = image_processor(lowercase_ , return_tensors="pt" )["pixel_values"].numpy() lowercase__ : str = processor(images=lowercase_ , return_tensors="pt" )["pixel_values"].numpy() lowercase__ : Optional[int] = image_processor(lowercase_ , return_tensors="tf" )["pixel_values"].numpy() lowercase__ : Any = processor(images=lowercase_ , return_tensors="tf" )["pixel_values"].numpy() self.assertTrue(np.allclose(lowercase_ , lowercase_ ) ) self.assertTrue(np.allclose(lowercase_ , lowercase_ ) ) self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
333
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask UpperCamelCase = logging.getLogger(__name__) class snake_case_ ( __A ): __A : int = "token-classification" def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]: if type(lowercase_ ) == dict: lowercase__ : Dict = Namespace(**lowercase_ ) lowercase__ : str = import_module("tasks" ) try: lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type ) lowercase__ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels ) lowercase__ : int = CrossEntropyLoss().ignore_index super().__init__(lowercase_ , len(self.labels ) , self.mode ) def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any: return self.model(**lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple: lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": lowercase__ : Tuple = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids lowercase__ : Optional[int] = self(**lowercase_ ) lowercase__ : Union[str, Any] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]: lowercase__ : Tuple = self.hparams for mode in ["train", "dev", "test"]: lowercase__ : Any = self._feature_file(lowercase_ ) if os.path.exists(lowercase_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , lowercase_ ) lowercase__ : str = torch.load(lowercase_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ ) lowercase__ : Dict = self.token_classification_task.convert_examples_to_features( lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("Saving features into cached file %s" , lowercase_ ) torch.save(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader: lowercase__ : str = self._feature_file(lowercase_ ) logger.info("Loading features from cached file %s" , lowercase_ ) lowercase__ : str = torch.load(lowercase_ ) lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str: """Compute validation""" "" lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": lowercase__ : int = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids lowercase__ : List[Any] = self(**lowercase_ ) lowercase__ , lowercase__ : Any = outputs[:2] lowercase__ : Optional[Any] = logits.detach().cpu().numpy() lowercase__ : int = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]: lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean() lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 ) lowercase__ : Dict = np.argmax(lowercase_ , axis=2 ) lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 ) lowercase__ : Any = dict(enumerate(self.labels ) ) lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )] lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) lowercase__ : Any = { "val_loss": val_loss_mean, "accuracy_score": accuracy_score(lowercase_ , lowercase_ ), "precision": precision_score(lowercase_ , lowercase_ ), "recall": recall_score(lowercase_ , lowercase_ ), "f1": fa_score(lowercase_ , lowercase_ ), } lowercase__ : List[Any] = dict(results.items() ) lowercase__ : List[str] = results return ret, preds_list, out_label_list def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict: # when stable lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ ) lowercase__ : Any = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int: # updating to test_epoch_end instead of deprecated test_end lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 lowercase__ : Optional[int] = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple: # Add NER specific options BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ ) parser.add_argument( "--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" ) parser.add_argument( "--max_seq_length" , default=1_28 , type=lowercase_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , ) parser.add_argument( "--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd()) UpperCamelCase = parser.parse_args() UpperCamelCase = NERTransformer(args) UpperCamelCase = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True)) UpperCamelCase = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
333
1
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class snake_case_ ( __A ): def __get__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=None ) -> Optional[Any]: # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute" ) lowercase__ : Any = "__cached_" + self.fget.__name__ lowercase__ : Optional[int] = getattr(lowercase_ , lowercase_ , lowercase_ ) if cached is None: lowercase__ : Optional[Any] = self.fget(lowercase_ ) setattr(lowercase_ , lowercase_ , lowercase_ ) return cached def lowercase_ ( _lowerCamelCase : Union[str, Any]): lowercase__ : str = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f'''invalid truth value {val!r}''') def lowercase_ ( _lowerCamelCase : Optional[int]): if is_torch_fx_proxy(_lowerCamelCase): return True if is_torch_available(): import torch if isinstance(_lowerCamelCase , torch.Tensor): return True if is_tf_available(): import tensorflow as tf if isinstance(_lowerCamelCase , tf.Tensor): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer)): return True return isinstance(_lowerCamelCase , np.ndarray) def lowercase_ ( _lowerCamelCase : Optional[Any]): return isinstance(_lowerCamelCase , np.ndarray) def lowercase_ ( _lowerCamelCase : Any): return _is_numpy(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): import torch return isinstance(_lowerCamelCase , torch.Tensor) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return False if not is_torch_available() else _is_torch(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): import torch return isinstance(_lowerCamelCase , torch.device) def lowercase_ ( _lowerCamelCase : List[Any]): return False if not is_torch_available() else _is_torch_device(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[Any]): import torch if isinstance(_lowerCamelCase , _lowerCamelCase): if hasattr(_lowerCamelCase , _lowerCamelCase): lowercase__ : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase) else: return False return isinstance(_lowerCamelCase , torch.dtype) def lowercase_ ( _lowerCamelCase : int): return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : str): import tensorflow as tf return isinstance(_lowerCamelCase , tf.Tensor) def lowercase_ ( _lowerCamelCase : List[Any]): return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(_lowerCamelCase , "is_symbolic_tensor"): return tf.is_symbolic_tensor(_lowerCamelCase) return type(_lowerCamelCase) == tf.Tensor def lowercase_ ( _lowerCamelCase : str): return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Dict): import jax.numpy as jnp # noqa: F811 return isinstance(_lowerCamelCase , jnp.ndarray) def lowercase_ ( _lowerCamelCase : Any): return False if not is_flax_available() else _is_jax(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Dict): if isinstance(_lowerCamelCase , (dict, UserDict)): return {k: to_py_obj(_lowerCamelCase) for k, v in obj.items()} elif isinstance(_lowerCamelCase , (list, tuple)): return [to_py_obj(_lowerCamelCase) for o in obj] elif is_tf_tensor(_lowerCamelCase): return obj.numpy().tolist() elif is_torch_tensor(_lowerCamelCase): return obj.detach().cpu().tolist() elif is_jax_tensor(_lowerCamelCase): return np.asarray(_lowerCamelCase).tolist() elif isinstance(_lowerCamelCase , (np.ndarray, np.number)): # tolist also works on 0d np arrays return obj.tolist() else: return obj def lowercase_ ( _lowerCamelCase : Optional[int]): if isinstance(_lowerCamelCase , (dict, UserDict)): return {k: to_numpy(_lowerCamelCase) for k, v in obj.items()} elif isinstance(_lowerCamelCase , (list, tuple)): return np.array(_lowerCamelCase) elif is_tf_tensor(_lowerCamelCase): return obj.numpy() elif is_torch_tensor(_lowerCamelCase): return obj.detach().cpu().numpy() elif is_jax_tensor(_lowerCamelCase): return np.asarray(_lowerCamelCase) else: return obj class snake_case_ ( __A ): def __UpperCamelCase ( self : Optional[Any] ) -> Dict: lowercase__ : Union[str, Any] = fields(self ) # Safety and consistency checks if not len(lowercase_ ): raise ValueError(F'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' ) lowercase__ : List[Any] = getattr(self , class_fields[0].name ) lowercase__ : int = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(lowercase_ ): if isinstance(lowercase_ , lowercase_ ): lowercase__ : Optional[int] = first_field.items() lowercase__ : Optional[Any] = True else: try: lowercase__ : str = iter(lowercase_ ) lowercase__ : Any = True except TypeError: lowercase__ : Optional[Any] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(lowercase_ ): if ( not isinstance(lowercase_ , (list, tuple) ) or not len(lowercase_ ) == 2 or not isinstance(element[0] , lowercase_ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute lowercase__ : str = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self , element[0] , element[1] ) if element[1] is not None: lowercase__ : Tuple = element[1] elif first_field is not None: lowercase__ : Optional[Any] = first_field else: for field in class_fields: lowercase__ : List[Any] = getattr(self , field.name ) if v is not None: lowercase__ : List[str] = v def __delitem__( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Dict ) -> Optional[int]: raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def __UpperCamelCase ( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ) -> Tuple: raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : List[Any] ) -> Optional[int]: raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def __UpperCamelCase ( self : List[str] , *lowercase_ : str , **lowercase_ : Dict ) -> str: raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self : List[Any] , lowercase_ : List[str] ) -> Union[str, Any]: if isinstance(lowercase_ , lowercase_ ): lowercase__ : List[str] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> List[Any]: if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(lowercase_ , lowercase_ ) super().__setattr__(lowercase_ , lowercase_ ) def __setitem__( self : Union[str, Any] , lowercase_ : int , lowercase_ : str ) -> int: # Will raise a KeyException if needed super().__setitem__(lowercase_ , lowercase_ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : str ) -> Tuple[Any]: return tuple(self[k] for k in self.keys() ) class snake_case_ ( __A ,__A ): @classmethod def __UpperCamelCase ( cls : int , lowercase_ : Dict ) -> Optional[Any]: raise ValueError( F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class snake_case_ ( __A ): __A : str = "longest" __A : Any = "max_length" __A : List[Any] = "do_not_pad" class snake_case_ ( __A ): __A : Dict = "pt" __A : str = "tf" __A : Optional[int] = "np" __A : List[Any] = "jax" class snake_case_ : def __init__( self : Dict , lowercase_ : List[ContextManager] ) -> Optional[Any]: lowercase__ : str = context_managers lowercase__ : List[Any] = ExitStack() def __enter__( self : Dict ) -> List[Any]: for context_manager in self.context_managers: self.stack.enter_context(lowercase_ ) def __exit__( self : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Optional[Any]: self.stack.__exit__(*lowercase_ , **lowercase_ ) def lowercase_ ( _lowerCamelCase : Any): lowercase__ : Optional[Any] = infer_framework(_lowerCamelCase) if framework == "tf": lowercase__ : List[str] = inspect.signature(model_class.call) # TensorFlow models elif framework == "pt": lowercase__ : List[str] = inspect.signature(model_class.forward) # PyTorch models else: lowercase__ : List[Any] = inspect.signature(model_class.__call__) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def lowercase_ ( _lowerCamelCase : Tuple): lowercase__ : Union[str, Any] = model_class.__name__ lowercase__ : Union[str, Any] = infer_framework(_lowerCamelCase) if framework == "tf": lowercase__ : Union[str, Any] = inspect.signature(model_class.call) # TensorFlow models elif framework == "pt": lowercase__ : Optional[int] = inspect.signature(model_class.forward) # PyTorch models else: lowercase__ : List[str] = inspect.signature(model_class.__call__) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def lowercase_ ( _lowerCamelCase : MutableMapping , _lowerCamelCase : str = "" , _lowerCamelCase : str = "."): def _flatten_dict(_lowerCamelCase : Tuple , _lowerCamelCase : Any="" , _lowerCamelCase : int="."): for k, v in d.items(): lowercase__ : Optional[Any] = str(_lowerCamelCase) + delimiter + str(_lowerCamelCase) if parent_key else k if v and isinstance(_lowerCamelCase , _lowerCamelCase): yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase).items() else: yield key, v return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)) @contextmanager def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : bool = False): if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[Any]=None): if is_numpy_array(_lowerCamelCase): return np.transpose(_lowerCamelCase , axes=_lowerCamelCase) elif is_torch_tensor(_lowerCamelCase): return array.T if axes is None else array.permute(*_lowerCamelCase) elif is_tf_tensor(_lowerCamelCase): import tensorflow as tf return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase) elif is_jax_tensor(_lowerCamelCase): return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase) else: raise ValueError(f'''Type not supported for transpose: {type(_lowerCamelCase)}.''') def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple): if is_numpy_array(_lowerCamelCase): return np.reshape(_lowerCamelCase , _lowerCamelCase) elif is_torch_tensor(_lowerCamelCase): return array.reshape(*_lowerCamelCase) elif is_tf_tensor(_lowerCamelCase): import tensorflow as tf return tf.reshape(_lowerCamelCase , _lowerCamelCase) elif is_jax_tensor(_lowerCamelCase): return jnp.reshape(_lowerCamelCase , _lowerCamelCase) else: raise ValueError(f'''Type not supported for reshape: {type(_lowerCamelCase)}.''') def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=None): if is_numpy_array(_lowerCamelCase): return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase) elif is_torch_tensor(_lowerCamelCase): return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase) elif is_tf_tensor(_lowerCamelCase): import tensorflow as tf return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase) elif is_jax_tensor(_lowerCamelCase): return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase) else: raise ValueError(f'''Type not supported for squeeze: {type(_lowerCamelCase)}.''') def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]): if is_numpy_array(_lowerCamelCase): return np.expand_dims(_lowerCamelCase , _lowerCamelCase) elif is_torch_tensor(_lowerCamelCase): return array.unsqueeze(dim=_lowerCamelCase) elif is_tf_tensor(_lowerCamelCase): import tensorflow as tf return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase) elif is_jax_tensor(_lowerCamelCase): return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase) else: raise ValueError(f'''Type not supported for expand_dims: {type(_lowerCamelCase)}.''') def lowercase_ ( _lowerCamelCase : int): if is_numpy_array(_lowerCamelCase): return np.size(_lowerCamelCase) elif is_torch_tensor(_lowerCamelCase): return array.numel() elif is_tf_tensor(_lowerCamelCase): import tensorflow as tf return tf.size(_lowerCamelCase) elif is_jax_tensor(_lowerCamelCase): return array.size else: raise ValueError(f'''Type not supported for expand_dims: {type(_lowerCamelCase)}.''') def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : int): for key, value in auto_map.items(): if isinstance(_lowerCamelCase , (tuple, list)): lowercase__ : Optional[Any] = [f'''{repo_id}--{v}''' if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: lowercase__ : Any = f'''{repo_id}--{value}''' return auto_map def lowercase_ ( _lowerCamelCase : Union[str, Any]): for base_class in inspect.getmro(_lowerCamelCase): lowercase__ : List[str] = base_class.__module__ lowercase__ : Any = base_class.__name__ if module.startswith("tensorflow") or module.startswith("keras") or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch") or name == "PreTrainedModel": return "pt" elif module.startswith("flax") or module.startswith("jax") or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f'''Could not infer framework from class {model_class}.''')
333
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { '''configuration_mask2former''': [ '''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Mask2FormerConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Mask2FormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Mask2FormerForUniversalSegmentation''', '''Mask2FormerModel''', '''Mask2FormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
333
1
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller UpperCamelCase = 3 def lowercase_ ( _lowerCamelCase : int): print("Generating primitive root of p") while True: lowercase__ : Any = random.randrange(3 , _lowerCamelCase) if pow(_lowerCamelCase , 2 , _lowerCamelCase) == 1: continue if pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) == 1: continue return g def lowercase_ ( _lowerCamelCase : int): print("Generating prime p...") lowercase__ : Optional[Any] = rabin_miller.generate_large_prime(_lowerCamelCase) # select large prime number. lowercase__ : List[Any] = primitive_root(_lowerCamelCase) # one primitive root on modulo p. lowercase__ : Optional[int] = random.randrange(3 , _lowerCamelCase) # private_key -> have to be greater than 2 for safety. lowercase__ : List[Any] = cryptomath.find_mod_inverse(pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) , _lowerCamelCase) lowercase__ : Tuple = (key_size, e_a, e_a, p) lowercase__ : Dict = (key_size, d) return public_key, private_key def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : int): if os.path.exists(f'''{name}_pubkey.txt''') or os.path.exists(f'''{name}_privkey.txt'''): print("\nWARNING:") print( f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' "Use a different name or delete these files and re-run this program.") sys.exit() lowercase__ , lowercase__ : Optional[int] = generate_key(_lowerCamelCase) print(f'''\nWriting public key to file {name}_pubkey.txt...''') with open(f'''{name}_pubkey.txt''' , "w") as fo: fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''') print(f'''Writing private key to file {name}_privkey.txt...''') with open(f'''{name}_privkey.txt''' , "w") as fo: fo.write(f'''{private_key[0]},{private_key[1]}''') def lowercase_ ( ): print("Making key files...") make_key_files("elgamal" , 2048) print("Key files generation successful") if __name__ == "__main__": main()
333
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowercase_ ( _lowerCamelCase : List[str]): return 1 / (1 + np.exp(-z)) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple): return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase))) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000): lowercase__ : Optional[int] = np.zeros(x.shape[1]) for iterations in range(_lowerCamelCase): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Tuple = sigmoid_function(_lowerCamelCase) lowercase__ : Dict = np.dot(x.T , h - y) / y.size lowercase__ : int = theta - alpha * gradient # updating the weights lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase) lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase) if iterations % 100 == 0: print(f'''loss: {j} \t''') # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase = datasets.load_iris() UpperCamelCase = iris.data[:, :2] UpperCamelCase = (iris.target != 0) * 1 UpperCamelCase = 0.1 UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000) print('''theta: ''', theta) # printing the theta i.e our weights vector def lowercase_ ( _lowerCamelCase : List[Any]): return sigmoid_function( np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
333
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''openai/imagegpt-small''': '''''', '''openai/imagegpt-medium''': '''''', '''openai/imagegpt-large''': '''''', } class snake_case_ ( __A ): __A : str = "imagegpt" __A : Union[str, Any] = ["past_key_values"] __A : Optional[Any] = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[Any] , lowercase_ : Union[str, Any]=5_12 + 1 , lowercase_ : Optional[int]=32 * 32 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=24 , lowercase_ : Any=8 , lowercase_ : List[str]=None , lowercase_ : Optional[Any]="quick_gelu" , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Optional[int]=1E-5 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=True , lowercase_ : List[str]=False , lowercase_ : Tuple=False , lowercase_ : Any=False , **lowercase_ : Dict , ) -> Any: lowercase__ : Optional[Any] = vocab_size lowercase__ : Optional[Any] = n_positions lowercase__ : Tuple = n_embd lowercase__ : List[str] = n_layer lowercase__ : str = n_head lowercase__ : Optional[int] = n_inner lowercase__ : Any = activation_function lowercase__ : Optional[Any] = resid_pdrop lowercase__ : int = embd_pdrop lowercase__ : Tuple = attn_pdrop lowercase__ : Any = layer_norm_epsilon lowercase__ : List[Any] = initializer_range lowercase__ : Any = scale_attn_weights lowercase__ : Any = use_cache lowercase__ : Tuple = scale_attn_by_inverse_layer_idx lowercase__ : Union[str, Any] = reorder_and_upcast_attn lowercase__ : int = tie_word_embeddings super().__init__(tie_word_embeddings=lowercase_ , **lowercase_ ) class snake_case_ ( __A ): @property def __UpperCamelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ] ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : "FeatureExtractionMixin" , lowercase_ : int = 1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 3 , lowercase_ : int = 32 , lowercase_ : int = 32 , ) -> Mapping[str, Any]: lowercase__ : Tuple = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : Tuple = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) ) return inputs
333
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__A ) class snake_case_ ( __A ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} ) __A : ClassVar[Features] = Features({"text": Value("string" )} ) __A : ClassVar[Features] = Features({"labels": ClassLabel} ) __A : str = "text" __A : str = "labels" def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int: if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) lowercase__ : Optional[int] = copy.deepcopy(self ) lowercase__ : Tuple = self.label_schema.copy() lowercase__ : Union[str, Any] = features[self.label_column] lowercase__ : int = label_schema return task_template @property def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
333
1
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class snake_case_ ( __A ): __A : BigBirdConfig __A : jnp.dtype = jnp.floataa __A : bool = True def __UpperCamelCase ( self : Optional[Any] ) -> int: super().setup() lowercase__ : int = nn.Dense(5 , dtype=self.dtype ) def __call__( self : Any , *lowercase_ : int , **lowercase_ : Any ) -> List[str]: lowercase__ : List[Any] = super().__call__(*lowercase_ , **lowercase_ ) lowercase__ : Tuple = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class snake_case_ ( __A ): __A : List[Any] = FlaxBigBirdForNaturalQuestionsModule def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int): def cross_entropy(_lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=None): lowercase__ : str = logits.shape[-1] lowercase__ : Optional[int] = (labels[..., None] == jnp.arange(_lowerCamelCase)[None]).astype("f4") lowercase__ : str = jax.nn.log_softmax(_lowerCamelCase , axis=-1) lowercase__ : Tuple = -jnp.sum(labels * logits , axis=-1) if reduction is not None: lowercase__ : Tuple = reduction(_lowerCamelCase) return loss lowercase__ : Optional[int] = partial(_lowerCamelCase , reduction=jnp.mean) lowercase__ : int = cross_entropy(_lowerCamelCase , _lowerCamelCase) lowercase__ : Any = cross_entropy(_lowerCamelCase , _lowerCamelCase) lowercase__ : int = cross_entropy(_lowerCamelCase , _lowerCamelCase) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class snake_case_ : __A : str = "google/bigbird-roberta-base" __A : int = 3000 __A : int = 1_0500 __A : int = 128 __A : int = 3 __A : int = 1 __A : int = 5 # tx_args __A : float = 3e-5 __A : float = 0.0 __A : int = 2_0000 __A : float = 0.0095 __A : str = "bigbird-roberta-natural-questions" __A : str = "training-expt" __A : str = "data/nq-training.jsonl" __A : str = "data/nq-validation.jsonl" def __UpperCamelCase ( self : Tuple ) -> Dict: os.makedirs(self.base_dir , exist_ok=lowercase_ ) lowercase__ : List[str] = os.path.join(self.base_dir , self.save_dir ) lowercase__ : str = self.batch_size_per_device * jax.device_count() @dataclass class snake_case_ : __A : int __A : int = 4096 # no dynamic padding on TPUs def __call__( self : List[str] , lowercase_ : Any ) -> str: lowercase__ : Dict = self.collate_fn(lowercase_ ) lowercase__ : Union[str, Any] = jax.tree_util.tree_map(lowercase_ , lowercase_ ) return batch def __UpperCamelCase ( self : List[str] , lowercase_ : Optional[Any] ) -> List[str]: lowercase__ , lowercase__ : Tuple = self.fetch_inputs(features["input_ids"] ) lowercase__ : List[str] = { "input_ids": jnp.array(lowercase_ , dtype=jnp.intaa ), "attention_mask": jnp.array(lowercase_ , dtype=jnp.intaa ), "start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ), "end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ), "pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ), } return batch def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : list ) -> Dict: lowercase__ : Union[str, Any] = [self._fetch_inputs(lowercase_ ) for ids in input_ids] return zip(*lowercase_ ) def __UpperCamelCase ( self : List[Any] , lowercase_ : list ) -> Any: lowercase__ : Tuple = [1 for _ in range(len(lowercase_ ) )] while len(lowercase_ ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple=None): if seed is not None: lowercase__ : List[Any] = dataset.shuffle(seed=_lowerCamelCase) for i in range(len(_lowerCamelCase) // batch_size): lowercase__ : Any = dataset[i * batch_size : (i + 1) * batch_size] yield dict(_lowerCamelCase) @partial(jax.pmap , axis_name="batch") def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , **_lowerCamelCase : Dict): def loss_fn(_lowerCamelCase : Optional[int]): lowercase__ : List[str] = model_inputs.pop("start_labels") lowercase__ : List[str] = model_inputs.pop("end_labels") lowercase__ : Any = model_inputs.pop("pooled_labels") lowercase__ : List[Any] = state.apply_fn(**_lowerCamelCase , params=_lowerCamelCase , dropout_rng=_lowerCamelCase , train=_lowerCamelCase) lowercase__ , lowercase__ , lowercase__ : Dict = outputs return state.loss_fn( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) lowercase__ , lowercase__ : List[Any] = jax.random.split(_lowerCamelCase) lowercase__ : Optional[int] = jax.value_and_grad(_lowerCamelCase) lowercase__ , lowercase__ : Dict = grad_fn(state.params) lowercase__ : Any = jax.lax.pmean({"loss": loss} , axis_name="batch") lowercase__ : Dict = jax.lax.pmean(_lowerCamelCase , "batch") lowercase__ : Tuple = state.apply_gradients(grads=_lowerCamelCase) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="batch") def lowercase_ ( _lowerCamelCase : Any , **_lowerCamelCase : Optional[Any]): lowercase__ : int = model_inputs.pop("start_labels") lowercase__ : Any = model_inputs.pop("end_labels") lowercase__ : int = model_inputs.pop("pooled_labels") lowercase__ : str = state.apply_fn(**_lowerCamelCase , params=state.params , train=_lowerCamelCase) lowercase__ , lowercase__ , lowercase__ : int = outputs lowercase__ : Optional[Any] = state.loss_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) lowercase__ : int = jax.lax.pmean({"loss": loss} , axis_name="batch") return metrics class snake_case_ ( train_state.TrainState ): __A : Callable = struct.field(pytree_node=__A ) @dataclass class snake_case_ : __A : Args __A : Callable __A : Callable __A : Callable __A : Callable __A : wandb __A : Callable = None def __UpperCamelCase ( self : Any , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int]=None ) -> int: lowercase__ : Tuple = model.params lowercase__ : Union[str, Any] = TrainState.create( apply_fn=model.__call__ , params=lowercase_ , tx=lowercase_ , loss_fn=lowercase_ , ) if ckpt_dir is not None: lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = restore_checkpoint(lowercase_ , lowercase_ ) lowercase__ : Optional[int] = { "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": num_train_steps, "weight_decay": args.weight_decay, } lowercase__ , lowercase__ : Dict = build_tx(**lowercase_ ) lowercase__ : Dict = train_state.TrainState( step=lowercase_ , apply_fn=model.__call__ , params=lowercase_ , tx=lowercase_ , opt_state=lowercase_ , ) lowercase__ : Any = args lowercase__ : Any = data_collator lowercase__ : List[Any] = lr lowercase__ : Optional[Any] = params lowercase__ : Optional[Any] = jax_utils.replicate(lowercase_ ) return state def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] ) -> Union[str, Any]: lowercase__ : str = self.args lowercase__ : str = len(lowercase_ ) // args.batch_size lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 ) lowercase__ : List[Any] = jax.random.split(lowercase_ , jax.device_count() ) for epoch in range(args.max_epochs ): lowercase__ : Any = jnp.array(0 , dtype=jnp.floataa ) lowercase__ : Union[str, Any] = get_batched_dataset(lowercase_ , args.batch_size , seed=lowercase_ ) lowercase__ : List[Any] = 0 for batch in tqdm(lowercase_ , total=lowercase_ , desc=F'''Running EPOCH-{epoch}''' ): lowercase__ : str = self.data_collator(lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.train_step_fn(lowercase_ , lowercase_ , **lowercase_ ) running_loss += jax_utils.unreplicate(metrics["loss"] ) i += 1 if i % args.logging_steps == 0: lowercase__ : Optional[int] = jax_utils.unreplicate(state.step ) lowercase__ : str = running_loss.item() / i lowercase__ : Optional[int] = self.scheduler_fn(state_step - 1 ) lowercase__ : List[Any] = self.evaluate(lowercase_ , lowercase_ ) lowercase__ : Dict = { "step": state_step.item(), "eval_loss": eval_loss.item(), "tr_loss": tr_loss, "lr": lr.item(), } tqdm.write(str(lowercase_ ) ) self.logger.log(lowercase_ , commit=lowercase_ ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=lowercase_ ) def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[str] ) -> Optional[int]: lowercase__ : Optional[int] = get_batched_dataset(lowercase_ , self.args.batch_size ) lowercase__ : Optional[Any] = len(lowercase_ ) // self.args.batch_size lowercase__ : Optional[Any] = jnp.array(0 , dtype=jnp.floataa ) lowercase__ : Dict = 0 for batch in tqdm(lowercase_ , total=lowercase_ , desc="Evaluating ... " ): lowercase__ : Optional[int] = self.data_collator(lowercase_ ) lowercase__ : List[str] = self.val_step_fn(lowercase_ , **lowercase_ ) running_loss += jax_utils.unreplicate(metrics["loss"] ) i += 1 return running_loss / i def __UpperCamelCase ( self : List[str] , lowercase_ : Dict , lowercase_ : Optional[int] ) -> Dict: lowercase__ : Dict = jax_utils.unreplicate(lowercase_ ) print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=" ... " ) self.model_save_fn(lowercase_ , params=state.params ) with open(os.path.join(lowercase_ , "opt_state.msgpack" ) , "wb" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(lowercase_ , "args.joblib" ) ) joblib.dump(self.data_collator , os.path.join(lowercase_ , "data_collator.joblib" ) ) with open(os.path.join(lowercase_ , "training_state.json" ) , "w" ) as f: json.dump({"step": state.step.item()} , lowercase_ ) print("DONE" ) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : int): print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=" ... ") with open(os.path.join(_lowerCamelCase , "flax_model.msgpack") , "rb") as f: lowercase__ : Union[str, Any] = from_bytes(state.params , f.read()) with open(os.path.join(_lowerCamelCase , "opt_state.msgpack") , "rb") as f: lowercase__ : List[Any] = from_bytes(state.opt_state , f.read()) lowercase__ : Dict = joblib.load(os.path.join(_lowerCamelCase , "args.joblib")) lowercase__ : List[Any] = joblib.load(os.path.join(_lowerCamelCase , "data_collator.joblib")) with open(os.path.join(_lowerCamelCase , "training_state.json") , "r") as f: lowercase__ : Optional[Any] = json.load(_lowerCamelCase) lowercase__ : Dict = training_state["step"] print("DONE") return params, opt_state, step, args, data_collator def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any]): lowercase__ : str = num_train_steps - warmup_steps lowercase__ : List[Any] = optax.linear_schedule(init_value=_lowerCamelCase , end_value=_lowerCamelCase , transition_steps=_lowerCamelCase) lowercase__ : str = optax.linear_schedule(init_value=_lowerCamelCase , end_value=1E-7 , transition_steps=_lowerCamelCase) lowercase__ : Union[str, Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps]) return lr def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : str): def weight_decay_mask(_lowerCamelCase : Any): lowercase__ : Optional[int] = traverse_util.flatten_dict(_lowerCamelCase) lowercase__ : str = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} return traverse_util.unflatten_dict(_lowerCamelCase) lowercase__ : List[Any] = scheduler_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) lowercase__ : Dict = optax.adamw(learning_rate=_lowerCamelCase , weight_decay=_lowerCamelCase , mask=_lowerCamelCase) return tx, lr
333
def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True): assert ( isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") return min_val if option else max_val def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): return int((number_a + number_a) / 2) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int): assert ( isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)") if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value") def answer(_lowerCamelCase : int) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started...") lowercase__ : Optional[int] = lower lowercase__ : List[Any] = higher lowercase__ : Dict = [] while True: lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase) last_numbers.append(_lowerCamelCase) if answer(_lowerCamelCase) == "low": lowercase__ : List[str] = number elif answer(_lowerCamelCase) == "high": lowercase__ : Optional[int] = number else: break print(f'''guess the number : {last_numbers[-1]}''') print(f'''details : {last_numbers!s}''') def lowercase_ ( ): lowercase__ : Tuple = int(input("Enter lower value : ").strip()) lowercase__ : Optional[int] = int(input("Enter high value : ").strip()) lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip()) guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) if __name__ == "__main__": main()
333
1
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position UpperCamelCase = '''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip UpperCamelCase = concatenate_datasets UpperCamelCase = DownloadConfig UpperCamelCase = DownloadManager UpperCamelCase = DownloadMode UpperCamelCase = DownloadConfig UpperCamelCase = DownloadMode UpperCamelCase = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
333
import os import re import shutil import sys import tempfile import unittest import black UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. UpperCamelCase = ''' \""" Output class for the scheduler\'s step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None ''' class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : str ) -> List[str]: lowercase__ : str = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) ) lowercase__ : List[Any] = self.diffusers_dir shutil.copy( os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , ) def __UpperCamelCase ( self : Optional[int] ) -> List[str]: lowercase__ : Dict = "src/diffusers" shutil.rmtree(self.diffusers_dir ) def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple: lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ ) lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" ) with open(lowercase_ , "w" , newline="\n" ) as f: f.write(lowercase_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowercase_ ) with open(lowercase_ , "r" ) as f: self.assertTrue(f.read() , lowercase_ ) def __UpperCamelCase ( self : str ) -> Optional[int]: lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" ) self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : int ) -> str: # Base copy consistency self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , ) # Copy consistency with a really long name lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
333
1
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class snake_case_ ( __A ): def __UpperCamelCase ( self : int , lowercase_ : str ) -> Optional[Any]: with open(lowercase_ , encoding="utf-8" ) as input_file: lowercase__ : List[str] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) lowercase__ : List[str] = input_file.read() lowercase__ : Optional[Any] = regexp.search(lowercase_ ) return match def __UpperCamelCase ( self : Tuple , lowercase_ : str ) -> Dict: with open(lowercase_ , encoding="utf-8" ) as input_file: lowercase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) lowercase__ : Union[str, Any] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowercase__ : Union[str, Any] = regexp.finditer(lowercase_ ) lowercase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __UpperCamelCase ( self : Any ) -> List[str]: lowercase__ : List[str] = Path("./datasets" ) lowercase__ : Union[str, Any] = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(lowercase_ ) ): raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' ) def __UpperCamelCase ( self : List[str] ) -> str: lowercase__ : Dict = Path("./datasets" ) lowercase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(lowercase_ ) ): raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
333
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): for param, grad_param in zip(model_a.parameters() , model_b.parameters()): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True): model.train() lowercase__ : Tuple = model(_lowerCamelCase) lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device)) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False): set_seed(42) lowercase__ : Dict = RegressionModel() lowercase__ : int = deepcopy(_lowerCamelCase) lowercase__ : str = RegressionDataset(length=80) lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16) model.to(accelerator.device) if sched: lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3) lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3) lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65) lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65) # Make a copy of `model` if sched: lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowercase_ ( _lowerCamelCase : Tuple): # Test when on a single CPU or GPU that the context manager does nothing lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase) # Use a single batch lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values() for iteration in range(3): # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: # Sync grads step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))] def lowercase_ ( _lowerCamelCase : Any): # Test on distributed setup that context manager behaves properly lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase) # Use a single batch lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values() for iteration in range(3): # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: # Sync grads step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))] def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False): lowercase__ : int = Accelerator( split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2) # Test that context manager behaves properly lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase) for iteration, batch in enumerate(_lowerCamelCase): lowercase__ , lowercase__ : str = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) with accelerator.accumulate(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))] GradientState._reset_state() def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False): lowercase__ : Dict = Accelerator( split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2) # Test that context manager behaves properly lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase) for iteration, batch in enumerate(_lowerCamelCase): lowercase__ , lowercase__ : Any = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)): if split_batches: sched.step() else: for _ in range(accelerator.num_processes): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase)) if accelerator.num_processes > 1: check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) GradientState._reset_state() def lowercase_ ( ): lowercase__ : List[str] = Accelerator() lowercase__ : List[Any] = RegressionDataset(length=80) lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16) lowercase__ : int = RegressionDataset(length=96) lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16) lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_lowerCamelCase): assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase) if iteration < len(_lowerCamelCase) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_lowerCamelCase): assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase) if batch_num < len(_lowerCamelCase) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowercase_ ( ): lowercase__ : str = Accelerator() lowercase__ : Dict = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**") test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**") test_noop_sync(_lowerCamelCase) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**") test_distributed_sync(_lowerCamelCase) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Any): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
333
1
import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict): # Initialise PyTorch model lowercase__ : List[Any] = FunnelConfig.from_json_file(_lowerCamelCase) print(f'''Building PyTorch model from configuration: {config}''') lowercase__ : Dict = FunnelBaseModel(_lowerCamelCase) if base_model else FunnelModel(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_funnel(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.''' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
333
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase) lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase) lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase) lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": lowercase__ : Any = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": lowercase__ : int = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Dict = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global].") # Encoder for layer_index in range(config.num_layers): lowercase__ : str = f'''layers_{str(_lowerCamelCase)}''' # Self-Attention lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"] lowercase__ : Any = tax_attention_key lowercase__ : Any = tax_attention_out lowercase__ : Any = tax_attention_query lowercase__ : List[str] = tax_attention_value lowercase__ : List[str] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Any = tax_global_layer_norm if split_mlp_wi: lowercase__ : Tuple = tax_mlp_wi_a lowercase__ : str = tax_mlp_wi_a else: lowercase__ : List[Any] = tax_mlp_wi lowercase__ : str = tax_mlp_wo lowercase__ : int = tax_mlp_layer_norm lowercase__ : List[str] = flax_model_encoder_layer_block # Only for layer 0: lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T lowercase__ : Optional[int] = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T lowercase__ : str = tax_encoder_global_rel_embedding # Assigning lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"] lowercase__ : Union[str, Any] = tax_encoder_norm # Decoder for layer_index in range(config.num_layers): lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}''' # Self-Attention lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"] lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"] lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"] lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"] # Layer Normalization lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"] lowercase__ : Any = tax_attention_key lowercase__ : List[Any] = tax_attention_out lowercase__ : Any = tax_attention_query lowercase__ : List[Any] = tax_attention_value lowercase__ : List[str] = tax_pre_attention_layer_norm lowercase__ : List[Any] = tax_enc_dec_attention_key lowercase__ : Optional[Any] = tax_enc_dec_attention_out lowercase__ : str = tax_enc_dec_attention_query lowercase__ : Union[str, Any] = tax_enc_dec_attention_value lowercase__ : Tuple = tax_cross_layer_norm if split_mlp_wi: lowercase__ : List[str] = tax_mlp_wi_a lowercase__ : List[Any] = tax_mlp_wi_a else: lowercase__ : Tuple = tax_mlp_wi lowercase__ : Any = tax_mlp_wo lowercase__ : Tuple = txa_mlp_layer_norm lowercase__ : int = flax_model_decoder_layer_block # Decoder Normalization lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"] lowercase__ : List[Any] = txa_decoder_norm # Only for layer 0: lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T lowercase__ : str = tax_decoder_rel_embedding # Token Embeddings lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"] lowercase__ : Optional[Any] = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(_lowerCamelCase) print("T5X Model was sucessfully converted!") if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) UpperCamelCase = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
333
1
from typing import Union import fire import torch from tqdm import tqdm def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str = "cpu" , _lowerCamelCase : Union[str, None] = None): lowercase__ : Optional[int] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase) for k, v in tqdm(state_dict.items()): if not isinstance(_lowerCamelCase , torch.Tensor): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin") lowercase__ : List[str] = v.half() if save_path is None: # overwrite src_path lowercase__ : Optional[int] = src_path torch.save(_lowerCamelCase , _lowerCamelCase) if __name__ == "__main__": fire.Fire(convert)
333
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class snake_case_ ( __A ): __A : Optional[int] = "rwkv" __A : List[str] = {"max_position_embeddings": "context_length"} def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int: lowercase__ : List[str] = vocab_size lowercase__ : str = context_length lowercase__ : List[Any] = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size lowercase__ : List[Any] = layer_norm_epsilon lowercase__ : str = rescale_every lowercase__ : Optional[int] = use_cache lowercase__ : int = bos_token_id lowercase__ : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
333
1
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( '''The `image_to_image.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionImg2ImgPipeline` instead.''' )
333
class snake_case_ : def __init__( self : int ) -> Optional[int]: lowercase__ : Optional[int] = 0 lowercase__ : List[str] = 0 lowercase__ : Any = {} def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]: if vertex not in self.adjacency: lowercase__ : List[Any] = {} self.num_vertices += 1 def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]: self.add_vertex(lowercase_ ) self.add_vertex(lowercase_ ) if head == tail: return lowercase__ : int = weight lowercase__ : Any = weight def __UpperCamelCase ( self : Dict ) -> Optional[int]: lowercase__ : List[Any] = self.get_edges() for edge in edges: lowercase__ , lowercase__ , lowercase__ : int = edge edges.remove((tail, head, weight) ) for i in range(len(lowercase_ ) ): lowercase__ : Tuple = list(edges[i] ) edges.sort(key=lambda lowercase_ : e[2] ) for i in range(len(lowercase_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: lowercase__ : int = edges[i][2] + 1 for edge in edges: lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge lowercase__ : Union[str, Any] = weight lowercase__ : Dict = weight def __str__( self : str ) -> Any: lowercase__ : str = "" for tail in self.adjacency: for head in self.adjacency[tail]: lowercase__ : Optional[Any] = self.adjacency[head][tail] string += F'''{head} -> {tail} == {weight}\n''' return string.rstrip("\n" ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: lowercase__ : Any = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def __UpperCamelCase ( self : List[str] ) -> Dict: return self.adjacency.keys() @staticmethod def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]: lowercase__ : Any = Graph() if vertices is None: lowercase__ : str = [] if edges is None: lowercase__ : List[Any] = [] for vertex in vertices: g.add_vertex(lowercase_ ) for edge in edges: g.add_edge(*lowercase_ ) return g class snake_case_ : def __init__( self : int ) -> List[str]: lowercase__ : Dict = {} lowercase__ : Tuple = {} def __len__( self : Union[str, Any] ) -> Union[str, Any]: return len(self.parent ) def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple: if item in self.parent: return self.find(lowercase_ ) lowercase__ : Union[str, Any] = item lowercase__ : int = 0 return item def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any: if item not in self.parent: return self.make_set(lowercase_ ) if item != self.parent[item]: lowercase__ : Union[str, Any] = self.find(self.parent[item] ) return self.parent[item] def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]: lowercase__ : Dict = self.find(lowercase_ ) lowercase__ : Optional[int] = self.find(lowercase_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: lowercase__ : Dict = roota return roota if self.rank[roota] < self.rank[roota]: lowercase__ : int = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 lowercase__ : Tuple = roota return roota return None @staticmethod def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]: lowercase__ : List[Any] = graph.num_vertices lowercase__ : Optional[Any] = Graph.UnionFind() lowercase__ : int = [] while num_components > 1: lowercase__ : List[Any] = {} for vertex in graph.get_vertices(): lowercase__ : Any = -1 lowercase__ : List[str] = graph.get_edges() for edge in edges: lowercase__ , lowercase__ , lowercase__ : str = edge edges.remove((tail, head, weight) ) for edge in edges: lowercase__ , lowercase__ , lowercase__ : List[str] = edge lowercase__ : List[str] = union_find.find(lowercase_ ) lowercase__ : Union[str, Any] = union_find.find(lowercase_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowercase__ : int = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowercase__ : Dict = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex] if union_find.find(lowercase_ ) != union_find.find(lowercase_ ): union_find.union(lowercase_ , lowercase_ ) mst_edges.append(cheap_edge[vertex] ) lowercase__ : Optional[Any] = num_components - 1 lowercase__ : List[Any] = Graph.build(edges=lowercase_ ) return mst
333
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''') UpperCamelCase = logging.getLogger(__name__) @dataclass class snake_case_ : __A : Optional[int] = field( default=128 ,metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } ,) __A : bool = field( default=__A ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) __A : bool = field( default=__A ,metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } ,) __A : Optional[int] = field( default=__A ,metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } ,) __A : Optional[int] = field( default=__A ,metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } ,) __A : Optional[int] = field( default=__A ,metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } ,) @dataclass class snake_case_ : __A : str = field( default=__A ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __A : str = field( default=__A ,metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} ) __A : Optional[str] = field( default=__A ,metadata={"help": "Train language if it is different from the evaluation language."} ) __A : Optional[str] = field( default=__A ,metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __A : Optional[str] = field( default=__A ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __A : Optional[str] = field( default=__A ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,) __A : Optional[bool] = field( default=__A ,metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} ,) __A : bool = field( default=__A ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,) __A : str = field( default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,) __A : bool = field( default=__A ,metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } ,) __A : bool = field( default=__A ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,) def lowercase_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_xnli" , _lowerCamelCase) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase__ : Any = training_args.get_process_log_level() logger.setLevel(_lowerCamelCase) datasets.utils.logging.set_verbosity(_lowerCamelCase) transformers.utils.logging.set_verbosity(_lowerCamelCase) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''') logger.info(f'''Training/evaluation parameters {training_args}''') # Detecting last checkpoint. lowercase__ : int = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: lowercase__ : Optional[Any] = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome.") elif last_checkpoint is not None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch.") # Set seed before initializing model. set_seed(training_args.seed) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: lowercase__ : List[Any] = load_dataset( "xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: lowercase__ : Optional[int] = load_dataset( "xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowercase__ : Dict = train_dataset.features["label"].names if training_args.do_eval: lowercase__ : Union[str, Any] = load_dataset( "xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowercase__ : str = eval_dataset.features["label"].names if training_args.do_predict: lowercase__ : Any = load_dataset( "xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowercase__ : List[str] = predict_dataset.features["label"].names # Labels lowercase__ : Optional[int] = len(_lowerCamelCase) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase__ : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel={str(_lowerCamelCase): label for i, label in enumerate(_lowerCamelCase)} , labelaid={label: i for i, label in enumerate(_lowerCamelCase)} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase__ : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: lowercase__ : Dict = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowercase__ : Union[str, Any] = False def preprocess_function(_lowerCamelCase : Any): # Tokenize the texts return tokenizer( examples["premise"] , examples["hypothesis"] , padding=_lowerCamelCase , max_length=data_args.max_seq_length , truncation=_lowerCamelCase , ) if training_args.do_train: if data_args.max_train_samples is not None: lowercase__ : Optional[Any] = min(len(_lowerCamelCase) , data_args.max_train_samples) lowercase__ : str = train_dataset.select(range(_lowerCamelCase)) with training_args.main_process_first(desc="train dataset map pre-processing"): lowercase__ : int = train_dataset.map( _lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , ) # Log a few random samples from the training set: for index in random.sample(range(len(_lowerCamelCase)) , 3): logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''') if training_args.do_eval: if data_args.max_eval_samples is not None: lowercase__ : List[str] = min(len(_lowerCamelCase) , data_args.max_eval_samples) lowercase__ : Optional[Any] = eval_dataset.select(range(_lowerCamelCase)) with training_args.main_process_first(desc="validation dataset map pre-processing"): lowercase__ : List[str] = eval_dataset.map( _lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , ) if training_args.do_predict: if data_args.max_predict_samples is not None: lowercase__ : Optional[int] = min(len(_lowerCamelCase) , data_args.max_predict_samples) lowercase__ : List[str] = predict_dataset.select(range(_lowerCamelCase)) with training_args.main_process_first(desc="prediction dataset map pre-processing"): lowercase__ : Tuple = predict_dataset.map( _lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , ) # Get the metric function lowercase__ : Optional[int] = evaluate.load("xnli") # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_lowerCamelCase : EvalPrediction): lowercase__ : int = p.predictions[0] if isinstance(p.predictions , _lowerCamelCase) else p.predictions lowercase__ : Optional[int] = np.argmax(_lowerCamelCase , axis=1) return metric.compute(predictions=_lowerCamelCase , references=p.label_ids) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowercase__ : Optional[Any] = default_data_collator elif training_args.fpaa: lowercase__ : Union[str, Any] = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8) else: lowercase__ : Any = None # Initialize our Trainer lowercase__ : Optional[int] = Trainer( model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , ) # Training if training_args.do_train: lowercase__ : Tuple = None if training_args.resume_from_checkpoint is not None: lowercase__ : str = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase__ : List[Any] = last_checkpoint lowercase__ : int = trainer.train(resume_from_checkpoint=_lowerCamelCase) lowercase__ : Optional[Any] = train_result.metrics lowercase__ : Dict = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase) ) lowercase__ : Optional[int] = min(_lowerCamelCase , len(_lowerCamelCase)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" , _lowerCamelCase) trainer.save_metrics("train" , _lowerCamelCase) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") lowercase__ : List[Any] = trainer.evaluate(eval_dataset=_lowerCamelCase) lowercase__ : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase) lowercase__ : int = min(_lowerCamelCase , len(_lowerCamelCase)) trainer.log_metrics("eval" , _lowerCamelCase) trainer.save_metrics("eval" , _lowerCamelCase) # Prediction if training_args.do_predict: logger.info("*** Predict ***") lowercase__ , lowercase__ , lowercase__ : int = trainer.predict(_lowerCamelCase , metric_key_prefix="predict") lowercase__ : int = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCamelCase) ) lowercase__ : Optional[Any] = min(_lowerCamelCase , len(_lowerCamelCase)) trainer.log_metrics("predict" , _lowerCamelCase) trainer.save_metrics("predict" , _lowerCamelCase) lowercase__ : str = np.argmax(_lowerCamelCase , axis=1) lowercase__ : List[Any] = os.path.join(training_args.output_dir , "predictions.txt") if trainer.is_world_process_zero(): with open(_lowerCamelCase , "w") as writer: writer.write("index\tprediction\n") for index, item in enumerate(_lowerCamelCase): lowercase__ : List[Any] = label_list[item] writer.write(f'''{index}\t{item}\n''') if __name__ == "__main__": main()
333
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[Any] = DPTConfig() if "large" in checkpoint_url: lowercase__ : str = 1024 lowercase__ : List[str] = 4096 lowercase__ : List[Any] = 24 lowercase__ : Dict = 16 lowercase__ : Union[str, Any] = [5, 11, 17, 23] lowercase__ : Any = [256, 512, 1024, 1024] lowercase__ : Optional[int] = (1, 384, 384) if "ade" in checkpoint_url: lowercase__ : Union[str, Any] = True lowercase__ : Tuple = 150 lowercase__ : Optional[int] = "huggingface/label-files" lowercase__ : str = "ade20k-id2label.json" lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Optional[Any] = idalabel lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()} lowercase__ : Tuple = [1, 150, 480, 480] return config, expected_shape def lowercase_ ( _lowerCamelCase : List[Any]): lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Tuple): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder") if "pretrained.model" in name: lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings") if "patch_embed" in name: lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings") if "pos_embed" in name: lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings") if "attn.proj" in name: lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense") if "proj" in name and "project" not in name: lowercase__ : int = name.replace("proj" , "projection") if "blocks" in name: lowercase__ : List[str] = name.replace("blocks" , "layer") if "mlp.fc1" in name: lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense") if "mlp.fc2" in name: lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense") if "norm1" in name: lowercase__ : List[str] = name.replace("norm1" , "layernorm_before") if "norm2" in name: lowercase__ : Dict = name.replace("norm2" , "layernorm_after") if "scratch.output_conv" in name: lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head") if "scratch" in name: lowercase__ : str = name.replace("scratch" , "neck") if "layer1_rn" in name: lowercase__ : int = name.replace("layer1_rn" , "convs.0") if "layer2_rn" in name: lowercase__ : int = name.replace("layer2_rn" , "convs.1") if "layer3_rn" in name: lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2") if "layer4_rn" in name: lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3") if "refinenet" in name: lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1]) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''') if "out_conv" in name: lowercase__ : str = name.replace("out_conv" , "projection") if "resConfUnit1" in name: lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1") if "resConfUnit2" in name: lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2") if "conv1" in name: lowercase__ : List[Any] = name.replace("conv1" , "convolution1") if "conv2" in name: lowercase__ : Tuple = name.replace("conv2" , "convolution2") # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0") if "pretrained.act_postprocess2.0.project.0" in name: lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0") if "pretrained.act_postprocess3.0.project.0" in name: lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0") if "pretrained.act_postprocess4.0.project.0" in name: lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0") # resize blocks if "pretrained.act_postprocess1.3" in name: lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection") if "pretrained.act_postprocess1.4" in name: lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize") if "pretrained.act_postprocess2.3" in name: lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection") if "pretrained.act_postprocess2.4" in name: lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize") if "pretrained.act_postprocess3.3" in name: lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection") if "pretrained.act_postprocess4.3" in name: lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection") if "pretrained.act_postprocess4.4" in name: lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize") if "pretrained" in name: lowercase__ : Any = name.replace("pretrained" , "dpt") if "bn" in name: lowercase__ : str = name.replace("bn" , "batch_norm") if "head" in name: lowercase__ : Optional[Any] = name.replace("head" , "head.head") if "encoder.norm" in name: lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm") if "auxlayer" in name: lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head") return name def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''') lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''') # next, add query, keys and values (in that order) to the state dict lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :] lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size] lowercase__ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] lowercase__ : int = in_proj_bias[-config.hidden_size :] def lowercase_ ( ): lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw) return im @torch.no_grad() def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict): lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase) # load original state_dict from URL lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu") # remove certain keys remove_ignore_keys_(_lowerCamelCase) # rename keys for key in state_dict.copy().keys(): lowercase__ : List[str] = state_dict.pop(_lowerCamelCase) lowercase__ : List[Any] = val # read in qkv matrices read_in_q_k_v(_lowerCamelCase , _lowerCamelCase) # load HuggingFace model lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase) model.load_state_dict(_lowerCamelCase) model.eval() # Check outputs on an image lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384 lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase) lowercase__ : List[str] = prepare_img() lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt") # forward pass lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth # Assert logits lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]]) if "ade" in checkpoint_url: lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]]) assert outputs.shape == torch.Size(_lowerCamelCase) assert ( torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase) ) Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase) print(f'''Saving model to {pytorch_dump_folder_path}''') model.save_pretrained(_lowerCamelCase) print(f'''Saving image processor to {pytorch_dump_folder_path}''') image_processor.save_pretrained(_lowerCamelCase) if push_to_hub: print("Pushing model to hub...") model.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) UpperCamelCase = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
333
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: UpperCamelCase = None UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''google/bigbird-roberta-base''': 4096, '''google/bigbird-roberta-large''': 4096, '''google/bigbird-base-trivia-itc''': 4096, } UpperCamelCase = '''▁''' class snake_case_ ( __A ): __A : Optional[Any] = VOCAB_FILES_NAMES __A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Tuple = BigBirdTokenizer __A : Tuple = ["input_ids", "attention_mask"] __A : List[int] = [] def __init__( self : Dict , lowercase_ : Dict=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Any="<s>" , lowercase_ : List[Any]="</s>" , lowercase_ : int="<pad>" , lowercase_ : Optional[Any]="[SEP]" , lowercase_ : int="[MASK]" , lowercase_ : Dict="[CLS]" , **lowercase_ : List[str] , ) -> str: lowercase__ : Optional[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token lowercase__ : List[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token lowercase__ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else unk_token lowercase__ : Union[str, Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token lowercase__ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token lowercase__ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowercase__ : List[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token super().__init__( lowercase_ , tokenizer_file=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , ) lowercase__ : List[Any] = vocab_file lowercase__ : Dict = False if not self.vocab_file else True def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]: lowercase__ : Any = [self.sep_token_id] lowercase__ : Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __UpperCamelCase ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowercase_ )) + [1] return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1] def __UpperCamelCase ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]: lowercase__ : str = [self.sep_token_id] lowercase__ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCamelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowercase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase__ : str = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) return (out_vocab_file,)
333
def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000): lowercase__ : Union[str, Any] = 1 lowercase__ : int = 0 for divide_by_number in range(_lowerCamelCase , digit + 1): lowercase__ : list[int] = [] lowercase__ : Dict = numerator for _ in range(1 , digit + 1): if now_divide in has_been_divided: if longest_list_length < len(_lowerCamelCase): lowercase__ : Union[str, Any] = len(_lowerCamelCase) lowercase__ : Optional[int] = divide_by_number else: has_been_divided.append(_lowerCamelCase) lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
333
1
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class snake_case_ ( __A ): __A : torch.FloatTensor class snake_case_ ( nn.Module ): def __init__( self : Any , lowercase_ : str=3 , lowercase_ : List[str]=3 , lowercase_ : List[Any]=("DownEncoderBlock2D",) , lowercase_ : Optional[int]=(64,) , lowercase_ : int=2 , lowercase_ : Optional[Any]=32 , lowercase_ : str="silu" , lowercase_ : Tuple=True , ) -> Optional[int]: super().__init__() lowercase__ : Tuple = layers_per_block lowercase__ : Any = torch.nn.Convad( lowercase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Union[str, Any] = None lowercase__ : Any = nn.ModuleList([] ) # down lowercase__ : Dict = block_out_channels[0] for i, down_block_type in enumerate(lowercase_ ): lowercase__ : List[str] = output_channel lowercase__ : List[Any] = block_out_channels[i] lowercase__ : Optional[int] = i == len(lowercase_ ) - 1 lowercase__ : Optional[int] = get_down_block( lowercase_ , num_layers=self.layers_per_block , in_channels=lowercase_ , out_channels=lowercase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowercase_ , resnet_groups=lowercase_ , attention_head_dim=lowercase_ , temb_channels=lowercase_ , ) self.down_blocks.append(lowercase_ ) # mid lowercase__ : Optional[Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowercase_ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase_ , temb_channels=lowercase_ , ) # out lowercase__ : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase_ , eps=1E-6 ) lowercase__ : str = nn.SiLU() lowercase__ : List[Any] = 2 * out_channels if double_z else out_channels lowercase__ : Optional[int] = nn.Convad(block_out_channels[-1] , lowercase_ , 3 , padding=1 ) lowercase__ : Tuple = False def __UpperCamelCase ( self : Any , lowercase_ : Optional[int] ) -> str: lowercase__ : int = x lowercase__ : Dict = self.conv_in(lowercase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowercase_ : Tuple ): def custom_forward(*lowercase_ : List[str] ): return module(*lowercase_ ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: lowercase__ : Tuple = torch.utils.checkpoint.checkpoint( create_custom_forward(lowercase_ ) , lowercase_ , use_reentrant=lowercase_ ) # middle lowercase__ : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowercase_ , use_reentrant=lowercase_ ) else: for down_block in self.down_blocks: lowercase__ : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase_ ) , lowercase_ ) # middle lowercase__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase_ ) else: # down for down_block in self.down_blocks: lowercase__ : Optional[Any] = down_block(lowercase_ ) # middle lowercase__ : List[str] = self.mid_block(lowercase_ ) # post-process lowercase__ : Any = self.conv_norm_out(lowercase_ ) lowercase__ : int = self.conv_act(lowercase_ ) lowercase__ : Tuple = self.conv_out(lowercase_ ) return sample class snake_case_ ( nn.Module ): def __init__( self : int , lowercase_ : Dict=3 , lowercase_ : Any=3 , lowercase_ : Tuple=("UpDecoderBlock2D",) , lowercase_ : List[str]=(64,) , lowercase_ : Optional[Any]=2 , lowercase_ : int=32 , lowercase_ : Optional[Any]="silu" , lowercase_ : Union[str, Any]="group" , ) -> Tuple: super().__init__() lowercase__ : Optional[Any] = layers_per_block lowercase__ : List[Any] = nn.Convad( lowercase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Dict = None lowercase__ : str = nn.ModuleList([] ) lowercase__ : Dict = in_channels if norm_type == "spatial" else None # mid lowercase__ : str = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowercase_ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase_ , temb_channels=lowercase_ , ) # up lowercase__ : Any = list(reversed(lowercase_ ) ) lowercase__ : Dict = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowercase_ ): lowercase__ : int = output_channel lowercase__ : Tuple = reversed_block_out_channels[i] lowercase__ : Union[str, Any] = i == len(lowercase_ ) - 1 lowercase__ : List[str] = get_up_block( lowercase_ , num_layers=self.layers_per_block + 1 , in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowercase_ , resnet_groups=lowercase_ , attention_head_dim=lowercase_ , temb_channels=lowercase_ , resnet_time_scale_shift=lowercase_ , ) self.up_blocks.append(lowercase_ ) lowercase__ : Any = output_channel # out if norm_type == "spatial": lowercase__ : List[str] = SpatialNorm(block_out_channels[0] , lowercase_ ) else: lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase_ , eps=1E-6 ) lowercase__ : Any = nn.SiLU() lowercase__ : Any = nn.Convad(block_out_channels[0] , lowercase_ , 3 , padding=1 ) lowercase__ : List[str] = False def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any]=None ) -> List[Any]: lowercase__ : Optional[Any] = z lowercase__ : Union[str, Any] = self.conv_in(lowercase_ ) lowercase__ : str = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowercase_ : List[str] ): def custom_forward(*lowercase_ : Union[str, Any] ): return module(*lowercase_ ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowercase_ , lowercase_ , use_reentrant=lowercase_ ) lowercase__ : int = sample.to(lowercase_ ) # up for up_block in self.up_blocks: lowercase__ : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(lowercase_ ) , lowercase_ , lowercase_ , use_reentrant=lowercase_ ) else: # middle lowercase__ : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowercase_ , lowercase_ ) lowercase__ : Any = sample.to(lowercase_ ) # up for up_block in self.up_blocks: lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase_ ) , lowercase_ , lowercase_ ) else: # middle lowercase__ : str = self.mid_block(lowercase_ , lowercase_ ) lowercase__ : Dict = sample.to(lowercase_ ) # up for up_block in self.up_blocks: lowercase__ : Union[str, Any] = up_block(lowercase_ , lowercase_ ) # post-process if latent_embeds is None: lowercase__ : Any = self.conv_norm_out(lowercase_ ) else: lowercase__ : List[Any] = self.conv_norm_out(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = self.conv_act(lowercase_ ) lowercase__ : Tuple = self.conv_out(lowercase_ ) return sample class snake_case_ ( nn.Module ): def __init__( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[str]=None , lowercase_ : str="random" , lowercase_ : Tuple=False , lowercase_ : Dict=True ) -> str: super().__init__() lowercase__ : int = n_e lowercase__ : List[Any] = vq_embed_dim lowercase__ : Optional[Any] = beta lowercase__ : Dict = legacy lowercase__ : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) lowercase__ : List[Any] = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) lowercase__ : Optional[Any] = self.used.shape[0] lowercase__ : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": lowercase__ : Optional[Any] = self.re_embed lowercase__ : Optional[Any] = self.re_embed + 1 print( F'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' F'''Using {self.unknown_index} for unknown indices.''' ) else: lowercase__ : List[str] = n_e lowercase__ : Tuple = sane_index_shape def __UpperCamelCase ( self : List[str] , lowercase_ : int ) -> List[str]: lowercase__ : Tuple = inds.shape assert len(lowercase_ ) > 1 lowercase__ : Optional[Any] = inds.reshape(ishape[0] , -1 ) lowercase__ : Optional[Any] = self.used.to(lowercase_ ) lowercase__ : List[str] = (inds[:, :, None] == used[None, None, ...]).long() lowercase__ : Optional[Any] = match.argmax(-1 ) lowercase__ : Optional[Any] = match.sum(2 ) < 1 if self.unknown_index == "random": lowercase__ : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: lowercase__ : Dict = self.unknown_index return new.reshape(lowercase_ ) def __UpperCamelCase ( self : Any , lowercase_ : int ) -> Union[str, Any]: lowercase__ : Optional[Any] = inds.shape assert len(lowercase_ ) > 1 lowercase__ : List[Any] = inds.reshape(ishape[0] , -1 ) lowercase__ : Optional[int] = self.used.to(lowercase_ ) if self.re_embed > self.used.shape[0]: # extra token lowercase__ : List[str] = 0 # simply set to zero lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase_ ) return back.reshape(lowercase_ ) def __UpperCamelCase ( self : Any , lowercase_ : List[str] ) -> int: # reshape z -> (batch, height, width, channel) and flatten lowercase__ : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous() lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z lowercase__ : Tuple = torch.argmin(torch.cdist(lowercase_ , self.embedding.weight ) , dim=1 ) lowercase__ : List[str] = self.embedding(lowercase_ ).view(z.shape ) lowercase__ : Optional[Any] = None lowercase__ : Optional[int] = None # compute loss for embedding if not self.legacy: lowercase__ : int = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: lowercase__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients lowercase__ : Dict = z + (z_q - z).detach() # reshape back to match original input shape lowercase__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: lowercase__ : Union[str, Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis lowercase__ : Union[str, Any] = self.remap_to_used(lowercase_ ) lowercase__ : Any = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: lowercase__ : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] ) -> List[str]: # shape specifying (batch, height, width, channel) if self.remap is not None: lowercase__ : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis lowercase__ : Tuple = self.unmap_to_all(lowercase_ ) lowercase__ : List[Any] = indices.reshape(-1 ) # flatten again # get quantized latent vectors lowercase__ : Optional[int] = self.embedding(lowercase_ ) if shape is not None: lowercase__ : Any = z_q.view(lowercase_ ) # reshape back to match original input shape lowercase__ : Any = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class snake_case_ ( __A ): def __init__( self : List[str] , lowercase_ : Tuple , lowercase_ : Union[str, Any]=False ) -> int: lowercase__ : Any = parameters lowercase__ , lowercase__ : Tuple = torch.chunk(lowercase_ , 2 , dim=1 ) lowercase__ : str = torch.clamp(self.logvar , -30.0 , 20.0 ) lowercase__ : str = deterministic lowercase__ : Any = torch.exp(0.5 * self.logvar ) lowercase__ : List[Any] = torch.exp(self.logvar ) if self.deterministic: lowercase__ : List[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[torch.Generator] = None ) -> torch.FloatTensor: # make sure sample is on the same device as the parameters and has same dtype lowercase__ : Tuple = randn_tensor( self.mean.shape , generator=lowercase_ , device=self.parameters.device , dtype=self.parameters.dtype ) lowercase__ : Union[str, Any] = self.mean + self.std * sample return x def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Union[str, Any]=None ) -> List[Any]: if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def __UpperCamelCase ( self : Dict , lowercase_ : int , lowercase_ : int=[1, 2, 3] ) -> List[str]: if self.deterministic: return torch.Tensor([0.0] ) lowercase__ : List[str] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase_ ) def __UpperCamelCase ( self : List[Any] ) -> Tuple: return self.mean
333
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ): __A : int = StableUnCLIPPipeline __A : int = TEXT_TO_IMAGE_PARAMS __A : Any = TEXT_TO_IMAGE_BATCH_PARAMS __A : int = TEXT_TO_IMAGE_IMAGE_PARAMS __A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __A : int = False def __UpperCamelCase ( self : Optional[int] ) -> List[str]: lowercase__ : str = 32 lowercase__ : Any = embedder_hidden_size # prior components torch.manual_seed(0 ) lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowercase__ : List[str] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) lowercase__ : Any = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , ) torch.manual_seed(0 ) lowercase__ : Union[str, Any] = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ ) lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowercase__ : Tuple = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) lowercase__ : str = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , ) torch.manual_seed(0 ) lowercase__ : Any = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , ) torch.manual_seed(0 ) lowercase__ : List[str] = AutoencoderKL() lowercase__ : List[Any] = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any: if str(lowercase_ ).startswith("mps" ): lowercase__ : Any = torch.manual_seed(lowercase_ ) else: lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase__ : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: lowercase__ : Union[str, Any] = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ ) def __UpperCamelCase ( self : List[Any] ) -> List[str]: lowercase__ : str = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowercase_ ) @slow @require_torch_gpu class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Tuple ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : int ) -> int: lowercase__ : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" ) lowercase__ : Optional[int] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) lowercase__ : int = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase__ : str = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) lowercase__ : Any = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
333
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__A ) class snake_case_ ( __A ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} ) __A : ClassVar[Features] = Features({"text": Value("string" )} ) __A : ClassVar[Features] = Features({"labels": ClassLabel} ) __A : str = "text" __A : str = "labels" def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int: if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) lowercase__ : Optional[int] = copy.deepcopy(self ) lowercase__ : Tuple = self.label_schema.copy() lowercase__ : Union[str, Any] = features[self.label_column] lowercase__ : int = label_schema return task_template @property def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
333
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False): try: lowercase__ : str = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : Union[str, Any] = default else: # KEY is set, convert it to True or False. try: lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) def lowercase_ ( _lowerCamelCase : int): return unittest.skip("Test was skipped")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Tuple): return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Dict): return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless( is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]): return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : str): return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None): if test_case is None: return partial(_lowerCamelCase , version=_lowerCamelCase) return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]): return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase) UpperCamelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless( _atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase) class snake_case_ ( unittest.TestCase ): __A : int = True @classmethod def __UpperCamelCase ( cls : str ) -> str: lowercase__ : str = tempfile.mkdtemp() @classmethod def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __UpperCamelCase ( self : str ) -> Optional[int]: if self.clear_on_setup: for path in Path(self.tmpdir ).glob("**/*" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(lowercase_ ) class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str: lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase_ ( _lowerCamelCase : int): lowercase__ : Tuple = AcceleratorState() lowercase__ : Optional[int] = tensor[None].clone().to(state.device) lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu() lowercase__ : Optional[Any] = tensor[0].cpu() for i in range(tensors.shape[0]): if not torch.equal(tensors[i] , _lowerCamelCase): return False return True class snake_case_ : def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]: lowercase__ : int = returncode lowercase__ : Dict = stdout lowercase__ : List[Any] = stderr async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str): while True: lowercase__ : int = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : str = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : Tuple = [] lowercase__ : List[Any] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))), asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True): lowercase__ : Optional[Any] = asyncio.get_event_loop() lowercase__ : List[Any] = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : str = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Dict = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') return result class snake_case_ ( __A ): pass def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False): try: lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT) if return_stdout: if hasattr(_lowerCamelCase , "decode"): lowercase__ : Optional[Any] = output.decode("utf-8") return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
333
1
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging UpperCamelCase = logging.get_logger(__name__) class snake_case_ ( __A ): __A : Any = "linear" __A : Any = "cosine" __A : Dict = "cosine_with_restarts" __A : Any = "polynomial" __A : Optional[Any] = "constant" __A : Any = "constant_with_warmup" __A : Any = "piecewise_constant" def lowercase_ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1): return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase: 1 , last_epoch=_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1): def lr_lambda(_lowerCamelCase : int): if current_step < num_warmup_steps: return float(_lowerCamelCase) / float(max(1.0 , _lowerCamelCase)) return 1.0 return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1): lowercase__ : Dict = {} lowercase__ : Union[str, Any] = step_rules.split(",") for rule_str in rule_list[:-1]: lowercase__ , lowercase__ : List[str] = rule_str.split(":") lowercase__ : int = int(_lowerCamelCase) lowercase__ : List[str] = float(_lowerCamelCase) lowercase__ : Dict = value lowercase__ : int = float(rule_list[-1]) def create_rules_function(_lowerCamelCase : Optional[int] , _lowerCamelCase : Any): def rule_func(_lowerCamelCase : int) -> float: lowercase__ : Optional[int] = sorted(rules_dict.keys()) for i, sorted_step in enumerate(_lowerCamelCase): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowercase__ : Tuple = create_rules_function(_lowerCamelCase , _lowerCamelCase) return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=-1): def lr_lambda(_lowerCamelCase : int): if current_step < num_warmup_steps: return float(_lowerCamelCase) / float(max(1 , _lowerCamelCase)) return max( 0.0 , float(num_training_steps - current_step) / float(max(1 , num_training_steps - num_warmup_steps))) return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1): def lr_lambda(_lowerCamelCase : str): if current_step < num_warmup_steps: return float(_lowerCamelCase) / float(max(1 , _lowerCamelCase)) lowercase__ : Optional[Any] = float(current_step - num_warmup_steps) / float(max(1 , num_training_steps - num_warmup_steps)) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase) * 2.0 * progress))) return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1): def lr_lambda(_lowerCamelCase : List[Any]): if current_step < num_warmup_steps: return float(_lowerCamelCase) / float(max(1 , _lowerCamelCase)) lowercase__ : Tuple = float(current_step - num_warmup_steps) / float(max(1 , num_training_steps - num_warmup_steps)) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase) * progress) % 1.0)))) return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Union[str, Any]=1.0 , _lowerCamelCase : List[Any]=-1): lowercase__ : Any = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''') def lr_lambda(_lowerCamelCase : int): if current_step < num_warmup_steps: return float(_lowerCamelCase) / float(max(1 , _lowerCamelCase)) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowercase__ : List[str] = lr_init - lr_end lowercase__ : Optional[Any] = num_training_steps - num_warmup_steps lowercase__ : Tuple = 1 - (current_step - num_warmup_steps) / decay_steps lowercase__ : List[str] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) UpperCamelCase = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowercase_ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ): lowercase__ : Optional[int] = SchedulerType(_lowerCamelCase) lowercase__ : Optional[Any] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''') if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''') if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( _lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( _lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , ) return schedule_func( _lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase)
333
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : int = ["flax"] def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Optional[Any] = ["flax"] def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Optional[int] = ["flax"] def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : int = ["flax"] def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[str] = ["flax"] def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]: requires_backends(cls , ["flax"] )
333
1
import os def lowercase_ ( _lowerCamelCase : str = "input.txt"): with open(os.path.join(os.path.dirname(_lowerCamelCase) , _lowerCamelCase)) as input_file: lowercase__ : Tuple = [ [int(_lowerCamelCase) for element in line.split(",")] for line in input_file.readlines() ] lowercase__ : Optional[Any] = len(_lowerCamelCase) lowercase__ : int = len(matrix[0]) lowercase__ : Dict = [[-1 for _ in range(_lowerCamelCase)] for _ in range(_lowerCamelCase)] for i in range(_lowerCamelCase): lowercase__ : int = matrix[i][0] for j in range(1 , _lowerCamelCase): for i in range(_lowerCamelCase): lowercase__ : Optional[int] = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _lowerCamelCase): lowercase__ : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j]) for i in range(rows - 2 , -1 , -1): lowercase__ : str = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j]) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums) if __name__ == "__main__": print(f"{solution() = }")
333
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class snake_case_ ( __A ): __A : List[str] = "vit_mae" def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]: super().__init__(**lowercase_ ) lowercase__ : List[str] = hidden_size lowercase__ : str = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : List[Any] = intermediate_size lowercase__ : str = hidden_act lowercase__ : List[str] = hidden_dropout_prob lowercase__ : Optional[Any] = attention_probs_dropout_prob lowercase__ : Any = initializer_range lowercase__ : Optional[Any] = layer_norm_eps lowercase__ : Optional[Any] = image_size lowercase__ : Optional[int] = patch_size lowercase__ : Any = num_channels lowercase__ : str = qkv_bias lowercase__ : Optional[Any] = decoder_num_attention_heads lowercase__ : Any = decoder_hidden_size lowercase__ : Any = decoder_num_hidden_layers lowercase__ : Union[str, Any] = decoder_intermediate_size lowercase__ : int = mask_ratio lowercase__ : Tuple = norm_pix_loss
333
1
def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : int): lowercase__ : Tuple = len(_lowerCamelCase) lowercase__ : Any = [[0] * n for i in range(_lowerCamelCase)] for i in range(_lowerCamelCase): lowercase__ : Union[str, Any] = y_points[i] for i in range(2 , _lowerCamelCase): for j in range(_lowerCamelCase , _lowerCamelCase): lowercase__ : Tuple = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
333
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): while a != 0: lowercase__ , lowercase__ : Dict = b % a, a return b def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): if gcd(_lowerCamelCase , _lowerCamelCase) != 1: lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(_lowerCamelCase) lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m while va != 0: lowercase__ : Tuple = ua // va lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
333
1
from ...configuration_utils import PretrainedConfig UpperCamelCase = { '''google/tapas-base-finetuned-sqa''': ( '''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wtq''': ( '''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wikisql-supervised''': ( '''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json''' ), '''google/tapas-base-finetuned-tabfact''': ( '''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json''' ), } class snake_case_ ( __A ): __A : Any = "tapas" def __init__( self : List[str] , lowercase_ : Union[str, Any]=3_05_22 , lowercase_ : List[Any]=7_68 , lowercase_ : Optional[Any]=12 , lowercase_ : int=12 , lowercase_ : Dict=30_72 , lowercase_ : str="gelu" , lowercase_ : str=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , lowercase_ : List[str]=0.02 , lowercase_ : List[Any]=1E-12 , lowercase_ : str=0 , lowercase_ : Optional[int]=10.0 , lowercase_ : int=0 , lowercase_ : str=1.0 , lowercase_ : Optional[Any]=None , lowercase_ : List[Any]=1.0 , lowercase_ : List[Any]=False , lowercase_ : Optional[Any]=None , lowercase_ : Any=1.0 , lowercase_ : Optional[int]=1.0 , lowercase_ : Dict=False , lowercase_ : Any=False , lowercase_ : Dict="ratio" , lowercase_ : List[str]=None , lowercase_ : List[str]=None , lowercase_ : Dict=64 , lowercase_ : List[Any]=32 , lowercase_ : Optional[Any]=False , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : Optional[Any]=False , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=False , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Any , ) -> Optional[int]: super().__init__(pad_token_id=lowercase_ , **lowercase_ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowercase__ : Any = vocab_size lowercase__ : Dict = hidden_size lowercase__ : Optional[int] = num_hidden_layers lowercase__ : str = num_attention_heads lowercase__ : int = hidden_act lowercase__ : Optional[int] = intermediate_size lowercase__ : List[str] = hidden_dropout_prob lowercase__ : Tuple = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_sizes lowercase__ : str = initializer_range lowercase__ : Optional[Any] = layer_norm_eps # Fine-tuning task hyperparameters lowercase__ : List[str] = positive_label_weight lowercase__ : Union[str, Any] = num_aggregation_labels lowercase__ : Optional[Any] = aggregation_loss_weight lowercase__ : List[str] = use_answer_as_supervision lowercase__ : str = answer_loss_importance lowercase__ : List[str] = use_normalized_answer_loss lowercase__ : Tuple = huber_loss_delta lowercase__ : List[Any] = temperature lowercase__ : Optional[Any] = aggregation_temperature lowercase__ : List[Any] = use_gumbel_for_cells lowercase__ : Tuple = use_gumbel_for_aggregation lowercase__ : int = average_approximation_function lowercase__ : Optional[int] = cell_selection_preference lowercase__ : List[Any] = answer_loss_cutoff lowercase__ : str = max_num_rows lowercase__ : List[Any] = max_num_columns lowercase__ : List[Any] = average_logits_per_cell lowercase__ : List[str] = select_one_column lowercase__ : Any = allow_empty_column_selection lowercase__ : Union[str, Any] = init_cell_selection_weights_to_zero lowercase__ : Union[str, Any] = reset_position_index_per_cell lowercase__ : str = disable_per_token_loss # Aggregation hyperparameters lowercase__ : List[Any] = aggregation_labels lowercase__ : Union[str, Any] = no_aggregation_label_index if isinstance(self.aggregation_labels , lowercase_ ): lowercase__ : int = {int(lowercase_ ): v for k, v in aggregation_labels.items()}
333
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "): lowercase__ : Union[str, Any] = text.split(_lowerCamelCase) return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)] def lowercase_ ( _lowerCamelCase : dict): lowercase__ , lowercase__ : List[str] = [], [] for title, text in zip(documents["title"] , documents["text"]): if text is not None: for passage in split_text(_lowerCamelCase): titles.append(title if title is not None else "") texts.append(_lowerCamelCase) return {"title": titles, "text": texts} def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast): lowercase__ : Union[str, Any] = ctx_tokenizer( documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"] lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ): ###################################### logger.info("Step 1 - Create the dataset") ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase__ : str = load_dataset( "csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"]) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc) # And compute the embeddings lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase) lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) lowercase__ : List[Any] = Features( {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space lowercase__ : List[Any] = dataset.map( partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , ) # And finally save your dataset lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset") dataset.save_to_disk(_lowerCamelCase) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset") ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT) dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase) # And save the index lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss") dataset.get_index("embeddings").save(_lowerCamelCase) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class snake_case_ : __A : str = field( default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,) __A : Optional[str] = field( default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,) __A : str = field( default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,) __A : str = field( default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } ,) __A : Optional[str] = field( default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,) @dataclass class snake_case_ : __A : Optional[int] = field( default=__A ,metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } ,) __A : int = field( default=16 ,metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } ,) @dataclass class snake_case_ : __A : int = field( default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,) __A : int = field( default=128 ,metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } ,) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
333
1
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class snake_case_ : __A : int __A : int class snake_case_ : def __init__( self : str , lowercase_ : int ) -> str: lowercase__ : list[list[Edge]] = [[] for _ in range(lowercase_ )] lowercase__ : str = size def __getitem__( self : Union[str, Any] , lowercase_ : int ) -> Iterator[Edge]: return iter(self._graph[vertex] ) @property def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: return self._size def __UpperCamelCase ( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> Union[str, Any]: if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1." ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size)." ) self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) ) def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : int ) -> int | None: lowercase__ : Optional[Any] = deque([start_vertex] ) lowercase__ : list[int | None] = [None] * self.size lowercase__ : List[Any] = 0 while queue: lowercase__ : Dict = queue.popleft() lowercase__ : List[Any] = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowercase__ : Optional[int] = current_distance + edge.weight lowercase__ : int = distances[edge.destination_vertex] if ( isinstance(lowercase_ , lowercase_ ) and new_distance >= dest_vertex_distance ): continue lowercase__ : Optional[int] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex." ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
333
import argparse import datetime def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[Any] = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_lowerCamelCase) < 11: raise ValueError("Must be 10 characters long") # Get month lowercase__ : int = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12") lowercase__ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'") # Get day lowercase__ : int = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31") # Get second separator lowercase__ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'") # Get year lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?") # Get datetime obj for validation lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase)) # Start math if m <= 2: lowercase__ : Optional[Any] = y - 1 lowercase__ : int = m + 12 # maths var lowercase__ : int = int(str(_lowerCamelCase)[:2]) lowercase__ : int = int(str(_lowerCamelCase)[2:]) lowercase__ : int = int(2.6 * m - 5.39) lowercase__ : int = int(c / 4) lowercase__ : int = int(k / 4) lowercase__ : int = int(d + k) lowercase__ : int = int(t + u + v + x) lowercase__ : int = int(z - (2 * c)) lowercase__ : int = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer.") # Response lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) UpperCamelCase = parser.parse_args() zeller(args.date_input)
333
1
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''nvidia/segformer-b0-finetuned-ade-512-512''': ( '''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json''' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class snake_case_ ( __A ): __A : Tuple = "segformer" def __init__( self : Tuple , lowercase_ : List[str]=3 , lowercase_ : int=4 , lowercase_ : List[str]=[2, 2, 2, 2] , lowercase_ : Union[str, Any]=[8, 4, 2, 1] , lowercase_ : Optional[Any]=[32, 64, 1_60, 2_56] , lowercase_ : List[str]=[7, 3, 3, 3] , lowercase_ : str=[4, 2, 2, 2] , lowercase_ : Dict=[1, 2, 5, 8] , lowercase_ : Optional[Any]=[4, 4, 4, 4] , lowercase_ : Tuple="gelu" , lowercase_ : str=0.0 , lowercase_ : Dict=0.0 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=0.02 , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[int]=1E-6 , lowercase_ : Any=2_56 , lowercase_ : Union[str, Any]=2_55 , **lowercase_ : Any , ) -> str: super().__init__(**lowercase_ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( "Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be" " removed, as the behaviour will default to that of reshape_last_stage = True." , lowercase_ , ) lowercase__ : Any = num_channels lowercase__ : Any = num_encoder_blocks lowercase__ : Any = depths lowercase__ : str = sr_ratios lowercase__ : Any = hidden_sizes lowercase__ : List[Any] = patch_sizes lowercase__ : Any = strides lowercase__ : List[str] = mlp_ratios lowercase__ : List[Any] = num_attention_heads lowercase__ : Optional[int] = hidden_act lowercase__ : List[str] = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : Union[str, Any] = classifier_dropout_prob lowercase__ : List[Any] = initializer_range lowercase__ : List[Any] = drop_path_rate lowercase__ : Union[str, Any] = layer_norm_eps lowercase__ : Optional[int] = decoder_hidden_size lowercase__ : int = kwargs.get("reshape_last_stage" , lowercase_ ) lowercase__ : str = semantic_loss_ignore_index class snake_case_ ( __A ): __A : List[str] = version.parse("1.11" ) @property def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def __UpperCamelCase ( self : str ) -> float: return 1E-4 @property def __UpperCamelCase ( self : Dict ) -> int: return 12
333
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node UpperCamelCase = 4 UpperCamelCase = 3 class snake_case_ ( __A ): pass def lowercase_ ( _lowerCamelCase : List[str]): for shard in shards: for i in range(_lowerCamelCase): yield {"i": i, "shard": shard} def lowercase_ ( ): lowercase__ : List[str] = int(os.environ["RANK"]) lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"]) lowercase__ : Union[str, Any] = ArgumentParser() parser.add_argument("--streaming" , type=_lowerCamelCase) parser.add_argument("--local_rank" , type=_lowerCamelCase) parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0) lowercase__ : int = parser.parse_args() lowercase__ : Union[str, Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]} lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase) if not streaming: lowercase__ : str = Dataset.from_list(list(_lowerCamelCase)) lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase) lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase) lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : Any = full_size // world_size expected_local_size += int(rank < (full_size % world_size)) lowercase__ : List[str] = sum(1 for _ in dataloader) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''') if __name__ == "__main__": main()
333
1
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer UpperCamelCase = ['''gpt2'''] UpperCamelCase = '''gpt2''' if is_tf_available(): class snake_case_ ( tf.Module ): def __init__( self : Optional[int] , lowercase_ : Optional[int] ) -> Dict: super().__init__() lowercase__ : List[str] = tokenizer lowercase__ : Any = AutoConfig.from_pretrained(lowercase_ ) lowercase__ : str = TFGPTaLMHeadModel.from_config(lowercase_ ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def __UpperCamelCase ( self : List[str] , lowercase_ : Tuple ) -> str: lowercase__ : Dict = self.tokenizer(lowercase_ ) lowercase__ : Any = tokenized["input_ids"].to_tensor() lowercase__ : Dict = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) lowercase__ : List[Any] = self.model(input_ids=lowercase_ , attention_mask=lowercase_ )["logits"] return outputs @require_tf @require_keras_nlp class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Any ) -> List[Any]: super().setUp() lowercase__ : Optional[Any] = [GPTaTokenizer.from_pretrained(lowercase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)] lowercase__ : Optional[Any] = [TFGPTaTokenizer.from_pretrained(lowercase_ ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowercase__ : List[str] = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] lowercase__ : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __UpperCamelCase ( self : Any ) -> Optional[Any]: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: lowercase__ : Tuple = tokenizer([test_inputs] , return_tensors="tf" ) lowercase__ : str = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors lowercase__ : List[Any] = python_outputs[key].numpy() lowercase__ : Optional[Any] = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(lowercase_ , tf.intaa ) == tf_outputs_values ) ) @slow def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: for tf_tokenizer in self.tf_tokenizers: lowercase__ : Union[str, Any] = tf.function(lowercase_ ) for test_inputs in self.test_sentences: lowercase__ : str = tf.constant(lowercase_ ) lowercase__ : List[str] = compiled_tokenizer(lowercase_ ) lowercase__ : Dict = tf_tokenizer(lowercase_ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __UpperCamelCase ( self : List[str] ) -> Any: for tf_tokenizer in self.tf_tokenizers: lowercase__ : str = ModelToSave(tokenizer=lowercase_ ) lowercase__ : Dict = tf.convert_to_tensor([self.test_sentences[0]] ) lowercase__ : str = model.serving(lowercase_ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowercase__ : List[str] = Path(lowercase_ ) / "saved.model" tf.saved_model.save(lowercase_ , lowercase_ , signatures={"serving_default": model.serving} ) lowercase__ : str = tf.saved_model.load(lowercase_ ) lowercase__ : List[str] = loaded_model.signatures["serving_default"](lowercase_ )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def __UpperCamelCase ( self : Optional[int] ) -> List[str]: for tf_tokenizer in self.tf_tokenizers: lowercase__ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] ) lowercase__ : List[str] = tf_tokenizer(lowercase_ ) # Build model with some sample inputs lowercase__ : str = tf_tokenizer.get_config() lowercase__ : List[Any] = TFGPTaTokenizer.from_config(lowercase_ ) lowercase__ : Any = model_from_config(lowercase_ ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def __UpperCamelCase ( self : str ) -> Optional[Any]: for tf_tokenizer in self.tf_tokenizers: # for the test to run lowercase__ : List[str] = 12_31_23 for max_length in [3, 5, 10_24]: lowercase__ : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] ) lowercase__ : List[Any] = tf_tokenizer(lowercase_ , max_length=lowercase_ ) lowercase__ : List[str] = out["input_ids"].numpy().shape[1] assert out_length == max_length
333
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class snake_case_ ( __A ): __A : List[str] = "unispeech" def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any: super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ ) lowercase__ : List[str] = hidden_size lowercase__ : Any = feat_extract_norm lowercase__ : Optional[Any] = feat_extract_activation lowercase__ : Dict = list(lowercase_ ) lowercase__ : Union[str, Any] = list(lowercase_ ) lowercase__ : List[str] = list(lowercase_ ) lowercase__ : List[str] = conv_bias lowercase__ : Any = num_conv_pos_embeddings lowercase__ : Dict = num_conv_pos_embedding_groups lowercase__ : int = len(self.conv_dim ) lowercase__ : str = num_hidden_layers lowercase__ : Any = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : int = num_attention_heads lowercase__ : Union[str, Any] = hidden_dropout lowercase__ : Any = attention_dropout lowercase__ : Union[str, Any] = activation_dropout lowercase__ : Any = feat_proj_dropout lowercase__ : str = final_dropout lowercase__ : int = layerdrop lowercase__ : Optional[int] = layer_norm_eps lowercase__ : List[Any] = initializer_range lowercase__ : Any = num_ctc_classes lowercase__ : int = vocab_size lowercase__ : str = do_stable_layer_norm lowercase__ : Any = use_weighted_layer_sum lowercase__ : Dict = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__ : List[Any] = apply_spec_augment lowercase__ : Dict = mask_time_prob lowercase__ : Tuple = mask_time_length lowercase__ : str = mask_time_min_masks lowercase__ : List[Any] = mask_feature_prob lowercase__ : int = mask_feature_length lowercase__ : Optional[int] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase__ : Optional[int] = num_codevectors_per_group lowercase__ : List[str] = num_codevector_groups lowercase__ : Dict = contrastive_logits_temperature lowercase__ : Tuple = feat_quantizer_dropout lowercase__ : Any = num_negatives lowercase__ : Dict = codevector_dim lowercase__ : Tuple = proj_codevector_dim lowercase__ : List[str] = diversity_loss_weight # ctc loss lowercase__ : Tuple = ctc_loss_reduction lowercase__ : Dict = ctc_zero_infinity # pretraining loss lowercase__ : Optional[Any] = replace_prob @property def __UpperCamelCase ( self : Dict ) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1 )
333
1
from math import log from scipy.constants import Boltzmann, physical_constants UpperCamelCase = 300 # TEMPERATURE (unit = K) def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , ): if donor_conc <= 0: raise ValueError("Donor concentration should be positive") elif acceptor_conc <= 0: raise ValueError("Acceptor concentration should be positive") elif intrinsic_conc <= 0: raise ValueError("Intrinsic concentration should be positive") elif donor_conc <= intrinsic_conc: raise ValueError( "Donor concentration should be greater than intrinsic concentration") elif acceptor_conc <= intrinsic_conc: raise ValueError( "Acceptor concentration should be greater than intrinsic concentration") else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
333
def lowercase_ ( _lowerCamelCase : list): for i in range(len(_lowerCamelCase) - 1 , 0 , -1): lowercase__ : int = False for j in range(_lowerCamelCase , 0 , -1): if unsorted[j] < unsorted[j - 1]: lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j] lowercase__ : List[str] = True for j in range(_lowerCamelCase): if unsorted[j] > unsorted[j + 1]: lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j] lowercase__ : Dict = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(f"{cocktail_shaker_sort(unsorted) = }")
333
1
import functools def lowercase_ ( _lowerCamelCase : list[int] , _lowerCamelCase : list[int]): # Validation if not isinstance(_lowerCamelCase , _lowerCamelCase) or not all(isinstance(_lowerCamelCase , _lowerCamelCase) for day in days): raise ValueError("The parameter days should be a list of integers") if len(_lowerCamelCase) != 3 or not all(isinstance(_lowerCamelCase , _lowerCamelCase) for cost in costs): raise ValueError("The parameter costs should be a list of three integers") if len(_lowerCamelCase) == 0: return 0 if min(_lowerCamelCase) <= 0: raise ValueError("All days elements should be greater than 0") if max(_lowerCamelCase) >= 366: raise ValueError("All days elements should be less than 366") lowercase__ : List[str] = set(_lowerCamelCase) @functools.cache def dynamic_programming(_lowerCamelCase : int) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1) return min( costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , ) return dynamic_programming(1) if __name__ == "__main__": import doctest doctest.testmod()
333
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask UpperCamelCase = logging.getLogger(__name__) class snake_case_ ( __A ): __A : int = "token-classification" def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]: if type(lowercase_ ) == dict: lowercase__ : Dict = Namespace(**lowercase_ ) lowercase__ : str = import_module("tasks" ) try: lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type ) lowercase__ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels ) lowercase__ : int = CrossEntropyLoss().ignore_index super().__init__(lowercase_ , len(self.labels ) , self.mode ) def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any: return self.model(**lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple: lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": lowercase__ : Tuple = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids lowercase__ : Optional[int] = self(**lowercase_ ) lowercase__ : Union[str, Any] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]: lowercase__ : Tuple = self.hparams for mode in ["train", "dev", "test"]: lowercase__ : Any = self._feature_file(lowercase_ ) if os.path.exists(lowercase_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , lowercase_ ) lowercase__ : str = torch.load(lowercase_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ ) lowercase__ : Dict = self.token_classification_task.convert_examples_to_features( lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("Saving features into cached file %s" , lowercase_ ) torch.save(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader: lowercase__ : str = self._feature_file(lowercase_ ) logger.info("Loading features from cached file %s" , lowercase_ ) lowercase__ : str = torch.load(lowercase_ ) lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str: """Compute validation""" "" lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": lowercase__ : int = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids lowercase__ : List[Any] = self(**lowercase_ ) lowercase__ , lowercase__ : Any = outputs[:2] lowercase__ : Optional[Any] = logits.detach().cpu().numpy() lowercase__ : int = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]: lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean() lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 ) lowercase__ : Dict = np.argmax(lowercase_ , axis=2 ) lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 ) lowercase__ : Any = dict(enumerate(self.labels ) ) lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )] lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) lowercase__ : Any = { "val_loss": val_loss_mean, "accuracy_score": accuracy_score(lowercase_ , lowercase_ ), "precision": precision_score(lowercase_ , lowercase_ ), "recall": recall_score(lowercase_ , lowercase_ ), "f1": fa_score(lowercase_ , lowercase_ ), } lowercase__ : List[Any] = dict(results.items() ) lowercase__ : List[str] = results return ret, preds_list, out_label_list def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict: # when stable lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ ) lowercase__ : Any = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int: # updating to test_epoch_end instead of deprecated test_end lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 lowercase__ : Optional[int] = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple: # Add NER specific options BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ ) parser.add_argument( "--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" ) parser.add_argument( "--max_seq_length" , default=1_28 , type=lowercase_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , ) parser.add_argument( "--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd()) UpperCamelCase = parser.parse_args() UpperCamelCase = NERTransformer(args) UpperCamelCase = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True)) UpperCamelCase = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
333
1
from __future__ import annotations UpperCamelCase = 10 def lowercase_ ( _lowerCamelCase : list[int]): lowercase__ : List[Any] = 1 lowercase__ : int = max(_lowerCamelCase) while placement <= max_digit: # declare and initialize empty buckets lowercase__ : list[list] = [[] for _ in range(_lowerCamelCase)] # split list_of_ints between the buckets for i in list_of_ints: lowercase__ : List[Any] = int((i / placement) % RADIX) buckets[tmp].append(_lowerCamelCase) # put each buckets' contents into list_of_ints lowercase__ : Optional[int] = 0 for b in range(_lowerCamelCase): for i in buckets[b]: lowercase__ : str = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
333
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { '''configuration_mask2former''': [ '''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Mask2FormerConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Mask2FormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Mask2FormerForUniversalSegmentation''', '''Mask2FormerModel''', '''Mask2FormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
333
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[Any] = DPTConfig() if "large" in checkpoint_url: lowercase__ : str = 1024 lowercase__ : List[str] = 4096 lowercase__ : List[Any] = 24 lowercase__ : Dict = 16 lowercase__ : Union[str, Any] = [5, 11, 17, 23] lowercase__ : Any = [256, 512, 1024, 1024] lowercase__ : Optional[int] = (1, 384, 384) if "ade" in checkpoint_url: lowercase__ : Union[str, Any] = True lowercase__ : Tuple = 150 lowercase__ : Optional[int] = "huggingface/label-files" lowercase__ : str = "ade20k-id2label.json" lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Optional[Any] = idalabel lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()} lowercase__ : Tuple = [1, 150, 480, 480] return config, expected_shape def lowercase_ ( _lowerCamelCase : List[Any]): lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Tuple): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder") if "pretrained.model" in name: lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings") if "patch_embed" in name: lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings") if "pos_embed" in name: lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings") if "attn.proj" in name: lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense") if "proj" in name and "project" not in name: lowercase__ : int = name.replace("proj" , "projection") if "blocks" in name: lowercase__ : List[str] = name.replace("blocks" , "layer") if "mlp.fc1" in name: lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense") if "mlp.fc2" in name: lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense") if "norm1" in name: lowercase__ : List[str] = name.replace("norm1" , "layernorm_before") if "norm2" in name: lowercase__ : Dict = name.replace("norm2" , "layernorm_after") if "scratch.output_conv" in name: lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head") if "scratch" in name: lowercase__ : str = name.replace("scratch" , "neck") if "layer1_rn" in name: lowercase__ : int = name.replace("layer1_rn" , "convs.0") if "layer2_rn" in name: lowercase__ : int = name.replace("layer2_rn" , "convs.1") if "layer3_rn" in name: lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2") if "layer4_rn" in name: lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3") if "refinenet" in name: lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1]) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''') if "out_conv" in name: lowercase__ : str = name.replace("out_conv" , "projection") if "resConfUnit1" in name: lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1") if "resConfUnit2" in name: lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2") if "conv1" in name: lowercase__ : List[Any] = name.replace("conv1" , "convolution1") if "conv2" in name: lowercase__ : Tuple = name.replace("conv2" , "convolution2") # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0") if "pretrained.act_postprocess2.0.project.0" in name: lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0") if "pretrained.act_postprocess3.0.project.0" in name: lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0") if "pretrained.act_postprocess4.0.project.0" in name: lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0") # resize blocks if "pretrained.act_postprocess1.3" in name: lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection") if "pretrained.act_postprocess1.4" in name: lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize") if "pretrained.act_postprocess2.3" in name: lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection") if "pretrained.act_postprocess2.4" in name: lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize") if "pretrained.act_postprocess3.3" in name: lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection") if "pretrained.act_postprocess4.3" in name: lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection") if "pretrained.act_postprocess4.4" in name: lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize") if "pretrained" in name: lowercase__ : Any = name.replace("pretrained" , "dpt") if "bn" in name: lowercase__ : str = name.replace("bn" , "batch_norm") if "head" in name: lowercase__ : Optional[Any] = name.replace("head" , "head.head") if "encoder.norm" in name: lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm") if "auxlayer" in name: lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head") return name def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''') lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''') # next, add query, keys and values (in that order) to the state dict lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :] lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size] lowercase__ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] lowercase__ : int = in_proj_bias[-config.hidden_size :] def lowercase_ ( ): lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw) return im @torch.no_grad() def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict): lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase) # load original state_dict from URL lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu") # remove certain keys remove_ignore_keys_(_lowerCamelCase) # rename keys for key in state_dict.copy().keys(): lowercase__ : List[str] = state_dict.pop(_lowerCamelCase) lowercase__ : List[Any] = val # read in qkv matrices read_in_q_k_v(_lowerCamelCase , _lowerCamelCase) # load HuggingFace model lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase) model.load_state_dict(_lowerCamelCase) model.eval() # Check outputs on an image lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384 lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase) lowercase__ : List[str] = prepare_img() lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt") # forward pass lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth # Assert logits lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]]) if "ade" in checkpoint_url: lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]]) assert outputs.shape == torch.Size(_lowerCamelCase) assert ( torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase) ) Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase) print(f'''Saving model to {pytorch_dump_folder_path}''') model.save_pretrained(_lowerCamelCase) print(f'''Saving image processor to {pytorch_dump_folder_path}''') image_processor.save_pretrained(_lowerCamelCase) if push_to_hub: print("Pushing model to hub...") model.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) UpperCamelCase = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
333
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowercase_ ( _lowerCamelCase : List[str]): return 1 / (1 + np.exp(-z)) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple): return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase))) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000): lowercase__ : Optional[int] = np.zeros(x.shape[1]) for iterations in range(_lowerCamelCase): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Tuple = sigmoid_function(_lowerCamelCase) lowercase__ : Dict = np.dot(x.T , h - y) / y.size lowercase__ : int = theta - alpha * gradient # updating the weights lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase) lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase) if iterations % 100 == 0: print(f'''loss: {j} \t''') # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase = datasets.load_iris() UpperCamelCase = iris.data[:, :2] UpperCamelCase = (iris.target != 0) * 1 UpperCamelCase = 0.1 UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000) print('''theta: ''', theta) # printing the theta i.e our weights vector def lowercase_ ( _lowerCamelCase : List[Any]): return sigmoid_function( np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
333
1
def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True): assert ( isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") return min_val if option else max_val def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): return int((number_a + number_a) / 2) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int): assert ( isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)") if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value") def answer(_lowerCamelCase : int) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started...") lowercase__ : Optional[int] = lower lowercase__ : List[Any] = higher lowercase__ : Dict = [] while True: lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase) last_numbers.append(_lowerCamelCase) if answer(_lowerCamelCase) == "low": lowercase__ : List[str] = number elif answer(_lowerCamelCase) == "high": lowercase__ : Optional[int] = number else: break print(f'''guess the number : {last_numbers[-1]}''') print(f'''details : {last_numbers!s}''') def lowercase_ ( ): lowercase__ : Tuple = int(input("Enter lower value : ").strip()) lowercase__ : Optional[int] = int(input("Enter high value : ").strip()) lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip()) guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) if __name__ == "__main__": main()
333
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__A ) class snake_case_ ( __A ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} ) __A : ClassVar[Features] = Features({"text": Value("string" )} ) __A : ClassVar[Features] = Features({"labels": ClassLabel} ) __A : str = "text" __A : str = "labels" def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int: if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) lowercase__ : Optional[int] = copy.deepcopy(self ) lowercase__ : Tuple = self.label_schema.copy() lowercase__ : Union[str, Any] = features[self.label_column] lowercase__ : int = label_schema return task_template @property def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
333
1
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def lowercase_ ( _lowerCamelCase : dict): return (data["data"], data["target"]) def lowercase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray): lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42) xgb.fit(_lowerCamelCase , _lowerCamelCase) # Predict target for test data lowercase__ : Tuple = xgb.predict(_lowerCamelCase) lowercase__ : Optional[int] = predictions.reshape(len(_lowerCamelCase) , 1) return predictions def lowercase_ ( ): lowercase__ : Tuple = fetch_california_housing() lowercase__ , lowercase__ : Optional[Any] = data_handling(_lowerCamelCase) lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = train_test_split( _lowerCamelCase , _lowerCamelCase , test_size=0.25 , random_state=1) lowercase__ : Any = xgboost(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Error printing print(f'''Mean Absolute Error : {mean_absolute_error(_lowerCamelCase , _lowerCamelCase)}''') print(f'''Mean Square Error : {mean_squared_error(_lowerCamelCase , _lowerCamelCase)}''') if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
333
def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True): assert ( isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") return min_val if option else max_val def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): return int((number_a + number_a) / 2) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int): assert ( isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)") if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value") def answer(_lowerCamelCase : int) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started...") lowercase__ : Optional[int] = lower lowercase__ : List[Any] = higher lowercase__ : Dict = [] while True: lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase) last_numbers.append(_lowerCamelCase) if answer(_lowerCamelCase) == "low": lowercase__ : List[str] = number elif answer(_lowerCamelCase) == "high": lowercase__ : Optional[int] = number else: break print(f'''guess the number : {last_numbers[-1]}''') print(f'''details : {last_numbers!s}''') def lowercase_ ( ): lowercase__ : Tuple = int(input("Enter lower value : ").strip()) lowercase__ : Optional[int] = int(input("Enter high value : ").strip()) lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip()) guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) if __name__ == "__main__": main()
333
1
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCamelCase = '''tiny-wmt19-en-ru''' # Build # borrowed from a test UpperCamelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] UpperCamelCase = dict(zip(vocab, range(len(vocab)))) UpperCamelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = Path(tmpdirname) UpperCamelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] UpperCamelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] UpperCamelCase = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) UpperCamelCase = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCamelCase = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCamelCase = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test UpperCamelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''') UpperCamelCase = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
333
import os import re import shutil import sys import tempfile import unittest import black UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. UpperCamelCase = ''' \""" Output class for the scheduler\'s step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None ''' class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : str ) -> List[str]: lowercase__ : str = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) ) lowercase__ : List[Any] = self.diffusers_dir shutil.copy( os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , ) def __UpperCamelCase ( self : Optional[int] ) -> List[str]: lowercase__ : Dict = "src/diffusers" shutil.rmtree(self.diffusers_dir ) def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple: lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ ) lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" ) with open(lowercase_ , "w" , newline="\n" ) as f: f.write(lowercase_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowercase_ ) with open(lowercase_ , "r" ) as f: self.assertTrue(f.read() , lowercase_ ) def __UpperCamelCase ( self : str ) -> Optional[int]: lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" ) self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : int ) -> str: # Base copy consistency self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , ) # Copy consistency with a really long name lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
333
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) UpperCamelCase = { '''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''', '''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''', '''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''', '''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''', '''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''', '''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''', '''mask_downscaling.0''': '''mask_embed.conv1''', '''mask_downscaling.1''': '''mask_embed.layer_norm1''', '''mask_downscaling.3''': '''mask_embed.conv2''', '''mask_downscaling.4''': '''mask_embed.layer_norm2''', '''mask_downscaling.6''': '''mask_embed.conv3''', '''point_embeddings''': '''point_embed''', '''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''', '''image_encoder''': '''vision_encoder''', '''neck.0''': '''neck.conv1''', '''neck.1''': '''neck.layer_norm1''', '''neck.2''': '''neck.conv2''', '''neck.3''': '''neck.layer_norm2''', '''patch_embed.proj''': '''patch_embed.projection''', '''.norm''': '''.layer_norm''', '''blocks''': '''layers''', } def lowercase_ ( _lowerCamelCase : Dict): lowercase__ : Optional[Any] = {} state_dict.pop("pixel_mean" , _lowerCamelCase) state_dict.pop("pixel_std" , _lowerCamelCase) lowercase__ : Union[str, Any] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowercase__ : Tuple = key.replace(_lowerCamelCase , _lowerCamelCase) if re.match(_lowerCamelCase , _lowerCamelCase): lowercase__ : Dict = int(re.match(_lowerCamelCase , _lowerCamelCase).group(2)) if layer_nb == 0: lowercase__ : str = key.replace("layers.0" , "proj_in") elif layer_nb == 1: lowercase__ : Optional[Any] = key.replace("layers.1" , "layers.0") elif layer_nb == 2: lowercase__ : Optional[int] = key.replace("layers.2" , "proj_out") lowercase__ : int = value lowercase__ : Tuple = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] return model_state_dict def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : str="ybelkada/segment-anything"): lowercase__ : List[str] = hf_hub_download(_lowerCamelCase , f'''checkpoints/{model_name}.pth''') if "sam_vit_b" in model_name: lowercase__ : Tuple = SamConfig() elif "sam_vit_l" in model_name: lowercase__ : Dict = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) lowercase__ : List[Any] = SamConfig( vision_config=_lowerCamelCase , ) elif "sam_vit_h" in model_name: lowercase__ : Optional[Any] = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) lowercase__ : List[Any] = SamConfig( vision_config=_lowerCamelCase , ) lowercase__ : Tuple = torch.load(_lowerCamelCase , map_location="cpu") lowercase__ : List[str] = replace_keys(_lowerCamelCase) lowercase__ : Dict = SamImageProcessor() lowercase__ : Any = SamProcessor(image_processor=_lowerCamelCase) lowercase__ : str = SamModel(_lowerCamelCase) hf_model.load_state_dict(_lowerCamelCase) lowercase__ : Dict = hf_model.to("cuda") lowercase__ : List[Any] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" lowercase__ : Dict = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw).convert("RGB") lowercase__ : List[Any] = [[[400, 650]]] lowercase__ : List[str] = [[1]] lowercase__ : Tuple = processor(images=np.array(_lowerCamelCase) , return_tensors="pt").to("cuda") with torch.no_grad(): lowercase__ : Optional[int] = hf_model(**_lowerCamelCase) lowercase__ : int = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579890251159668 lowercase__ : Dict = processor( images=np.array(_lowerCamelCase) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors="pt").to("cuda") with torch.no_grad(): lowercase__ : Union[str, Any] = hf_model(**_lowerCamelCase) lowercase__ : Any = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712603092193604 lowercase__ : Tuple = ((75, 275, 1725, 850),) lowercase__ : Union[str, Any] = processor(images=np.array(_lowerCamelCase) , input_boxes=_lowerCamelCase , return_tensors="pt").to("cuda") with torch.no_grad(): lowercase__ : Union[str, Any] = hf_model(**_lowerCamelCase) lowercase__ : Union[str, Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686015605926514 # Test with 2 points and 1 image. lowercase__ : Union[str, Any] = [[[400, 650], [800, 650]]] lowercase__ : Dict = [[1, 1]] lowercase__ : List[Any] = processor( images=np.array(_lowerCamelCase) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors="pt").to("cuda") with torch.no_grad(): lowercase__ : List[str] = hf_model(**_lowerCamelCase) lowercase__ : Union[str, Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936047792434692 if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() UpperCamelCase = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195'''] parser.add_argument( '''--model_name''', default='''sam_vit_h_4b8939''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) parser.add_argument( '''--model_hub_id''', default='''ybelkada/segment-anything''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) UpperCamelCase = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
333
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): for param, grad_param in zip(model_a.parameters() , model_b.parameters()): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True): model.train() lowercase__ : Tuple = model(_lowerCamelCase) lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device)) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False): set_seed(42) lowercase__ : Dict = RegressionModel() lowercase__ : int = deepcopy(_lowerCamelCase) lowercase__ : str = RegressionDataset(length=80) lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16) model.to(accelerator.device) if sched: lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3) lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3) lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65) lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65) # Make a copy of `model` if sched: lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowercase_ ( _lowerCamelCase : Tuple): # Test when on a single CPU or GPU that the context manager does nothing lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase) # Use a single batch lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values() for iteration in range(3): # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: # Sync grads step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))] def lowercase_ ( _lowerCamelCase : Any): # Test on distributed setup that context manager behaves properly lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase) # Use a single batch lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values() for iteration in range(3): # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: # Sync grads step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))] def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False): lowercase__ : int = Accelerator( split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2) # Test that context manager behaves properly lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase) for iteration, batch in enumerate(_lowerCamelCase): lowercase__ , lowercase__ : str = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) with accelerator.accumulate(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))] GradientState._reset_state() def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False): lowercase__ : Dict = Accelerator( split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2) # Test that context manager behaves properly lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase) for iteration, batch in enumerate(_lowerCamelCase): lowercase__ , lowercase__ : Any = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)): if split_batches: sched.step() else: for _ in range(accelerator.num_processes): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase)) if accelerator.num_processes > 1: check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) GradientState._reset_state() def lowercase_ ( ): lowercase__ : List[str] = Accelerator() lowercase__ : List[Any] = RegressionDataset(length=80) lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16) lowercase__ : int = RegressionDataset(length=96) lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16) lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_lowerCamelCase): assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase) if iteration < len(_lowerCamelCase) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_lowerCamelCase): assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase) if batch_num < len(_lowerCamelCase) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowercase_ ( ): lowercase__ : str = Accelerator() lowercase__ : Dict = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**") test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**") test_noop_sync(_lowerCamelCase) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**") test_distributed_sync(_lowerCamelCase) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Any): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
333
1
import os import numpy import onnx def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]): lowercase__ : str = a.name lowercase__ : str = b.name lowercase__ : Any = "" lowercase__ : Optional[int] = "" lowercase__ : List[Any] = a == b lowercase__ : str = name_a lowercase__ : str = name_b return res def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : int): for i, input_name in enumerate(node_proto.input): if input_name == name: node_proto.input.insert(_lowerCamelCase , _lowerCamelCase) node_proto.input.pop(i + 1) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase) _graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]): for n in graph_proto.node: _node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : int = list(model.graph.initializer) lowercase__ : Any = list(model_without_ext.graph.initializer) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase__ : int = inits[i].name lowercase__ : Optional[int] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i]) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : str): lowercase__ : Tuple = os.path.dirname(_lowerCamelCase) lowercase__ : Tuple = os.path.basename(_lowerCamelCase) lowercase__ : Tuple = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase)) lowercase__ : int = list(model.graph.initializer) lowercase__ : List[str] = set() lowercase__ : List[Any] = {} lowercase__ : List[str] = [] lowercase__ : Optional[int] = 0 for i in range(len(_lowerCamelCase)): if i in dup_set: continue for j in range(i + 1 , len(_lowerCamelCase)): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j]): dup_set.add(_lowerCamelCase) dup_set.add(_lowerCamelCase) lowercase__ : Tuple = inits[j].data_type lowercase__ : int = numpy.prod(inits[j].dims) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("unexpected data type: " , _lowerCamelCase) total_reduced_size += mem_size lowercase__ : Dict = inits[i].name lowercase__ : Tuple = inits[j].name if name_i in dup_map: dup_map[name_i].append(_lowerCamelCase) else: lowercase__ : str = [name_j] ind_to_replace.append((j, i)) print("total reduced size: " , total_reduced_size / 1024 / 1024 / 1024 , "GB") lowercase__ : Tuple = sorted(_lowerCamelCase) _remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) lowercase__ : List[Any] = "optimized_" + model_file_name lowercase__ : str = os.path.join(_lowerCamelCase , _lowerCamelCase) onnx.save(_lowerCamelCase , _lowerCamelCase) return new_model
333
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase) lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase) lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase) lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": lowercase__ : Any = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": lowercase__ : int = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Dict = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global].") # Encoder for layer_index in range(config.num_layers): lowercase__ : str = f'''layers_{str(_lowerCamelCase)}''' # Self-Attention lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"] lowercase__ : Any = tax_attention_key lowercase__ : Any = tax_attention_out lowercase__ : Any = tax_attention_query lowercase__ : List[str] = tax_attention_value lowercase__ : List[str] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Any = tax_global_layer_norm if split_mlp_wi: lowercase__ : Tuple = tax_mlp_wi_a lowercase__ : str = tax_mlp_wi_a else: lowercase__ : List[Any] = tax_mlp_wi lowercase__ : str = tax_mlp_wo lowercase__ : int = tax_mlp_layer_norm lowercase__ : List[str] = flax_model_encoder_layer_block # Only for layer 0: lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T lowercase__ : Optional[int] = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T lowercase__ : str = tax_encoder_global_rel_embedding # Assigning lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"] lowercase__ : Union[str, Any] = tax_encoder_norm # Decoder for layer_index in range(config.num_layers): lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}''' # Self-Attention lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"] lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"] lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"] lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"] # Layer Normalization lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"] lowercase__ : Any = tax_attention_key lowercase__ : List[Any] = tax_attention_out lowercase__ : Any = tax_attention_query lowercase__ : List[Any] = tax_attention_value lowercase__ : List[str] = tax_pre_attention_layer_norm lowercase__ : List[Any] = tax_enc_dec_attention_key lowercase__ : Optional[Any] = tax_enc_dec_attention_out lowercase__ : str = tax_enc_dec_attention_query lowercase__ : Union[str, Any] = tax_enc_dec_attention_value lowercase__ : Tuple = tax_cross_layer_norm if split_mlp_wi: lowercase__ : List[str] = tax_mlp_wi_a lowercase__ : List[Any] = tax_mlp_wi_a else: lowercase__ : Tuple = tax_mlp_wi lowercase__ : Any = tax_mlp_wo lowercase__ : Tuple = txa_mlp_layer_norm lowercase__ : int = flax_model_decoder_layer_block # Decoder Normalization lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"] lowercase__ : List[Any] = txa_decoder_norm # Only for layer 0: lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T lowercase__ : str = tax_decoder_rel_embedding # Token Embeddings lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"] lowercase__ : Optional[Any] = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(_lowerCamelCase) print("T5X Model was sucessfully converted!") if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) UpperCamelCase = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
333
1
from __future__ import annotations import math class snake_case_ : def __init__( self : List[Any] , lowercase_ : int ) -> None: lowercase__ : Tuple = size # approximate the overall size of segment tree with given value lowercase__ : Union[str, Any] = [0 for i in range(0 , 4 * size )] # create array to store lazy update lowercase__ : List[Any] = [0 for i in range(0 , 4 * size )] lowercase__ : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def __UpperCamelCase ( self : Dict , lowercase_ : int ) -> int: return idx * 2 def __UpperCamelCase ( self : Any , lowercase_ : int ) -> int: return idx * 2 + 1 def __UpperCamelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : list[int] ) -> None: if left_element == right_element: lowercase__ : Optional[int] = a[left_element - 1] else: lowercase__ : Union[str, Any] = (left_element + right_element) // 2 self.build(self.left(lowercase_ ) , lowercase_ , lowercase_ , lowercase_ ) self.build(self.right(lowercase_ ) , mid + 1 , lowercase_ , lowercase_ ) lowercase__ : List[str] = max( self.segment_tree[self.left(lowercase_ )] , self.segment_tree[self.right(lowercase_ )] ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> bool: if self.flag[idx] is True: lowercase__ : Dict = self.lazy[idx] lowercase__ : List[Any] = False if left_element != right_element: lowercase__ : List[str] = self.lazy[idx] lowercase__ : Optional[int] = self.lazy[idx] lowercase__ : Optional[int] = True lowercase__ : Optional[int] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: lowercase__ : Any = val if left_element != right_element: lowercase__ : int = val lowercase__ : List[str] = val lowercase__ : Optional[int] = True lowercase__ : List[str] = True return True lowercase__ : Tuple = (left_element + right_element) // 2 self.update(self.left(lowercase_ ) , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) self.update(self.right(lowercase_ ) , mid + 1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = max( self.segment_tree[self.left(lowercase_ )] , self.segment_tree[self.right(lowercase_ )] ) return True def __UpperCamelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> int | float: if self.flag[idx] is True: lowercase__ : str = self.lazy[idx] lowercase__ : Any = False if left_element != right_element: lowercase__ : Dict = self.lazy[idx] lowercase__ : Optional[int] = self.lazy[idx] lowercase__ : Union[str, Any] = True lowercase__ : Optional[int] = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] lowercase__ : Dict = (left_element + right_element) // 2 lowercase__ : List[str] = self.query(self.left(lowercase_ ) , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : Dict = self.query(self.right(lowercase_ ) , mid + 1 , lowercase_ , lowercase_ , lowercase_ ) return max(lowercase_ , lowercase_ ) def __str__( self : List[Any] ) -> str: return str([self.query(1 , 1 , self.size , lowercase_ , lowercase_ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": UpperCamelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] UpperCamelCase = 15 UpperCamelCase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
333
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class snake_case_ ( __A ): __A : Optional[int] = "rwkv" __A : List[str] = {"max_position_embeddings": "context_length"} def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int: lowercase__ : List[str] = vocab_size lowercase__ : str = context_length lowercase__ : List[Any] = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size lowercase__ : List[Any] = layer_norm_epsilon lowercase__ : str = rescale_every lowercase__ : Optional[int] = use_cache lowercase__ : int = bos_token_id lowercase__ : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
333
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class snake_case_ ( __A ): __A : Optional[Any] = VOCAB_FILES_NAMES __A : Any = PRETRAINED_VOCAB_FILES_MAP __A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : List[str] = ["input_ids", "attention_mask"] __A : Dict = RobertaTokenizer def __init__( self : Optional[int] , lowercase_ : List[Any]=None , lowercase_ : Tuple=None , lowercase_ : Tuple=None , lowercase_ : List[Any]="replace" , lowercase_ : Union[str, Any]="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Tuple="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Optional[int]="<mask>" , lowercase_ : List[Any]=False , lowercase_ : Optional[Any]=True , **lowercase_ : List[str] , ) -> Any: super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , ) lowercase__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowercase_ ) != add_prefix_space: lowercase__ : Optional[Any] = getattr(lowercase_ , pre_tok_state.pop("type" ) ) lowercase__ : Optional[int] = add_prefix_space lowercase__ : int = pre_tok_class(**lowercase_ ) lowercase__ : List[Any] = add_prefix_space lowercase__ : Optional[Any] = "post_processor" lowercase__ : List[Any] = getattr(self.backend_tokenizer , lowercase_ , lowercase_ ) if tokenizer_component_instance: lowercase__ : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ : str = tuple(state["sep"] ) if "cls" in state: lowercase__ : List[str] = tuple(state["cls"] ) lowercase__ : Optional[int] = False if state.get("add_prefix_space" , lowercase_ ) != add_prefix_space: lowercase__ : List[Any] = add_prefix_space lowercase__ : str = True if state.get("trim_offsets" , lowercase_ ) != trim_offsets: lowercase__ : List[str] = trim_offsets lowercase__ : Tuple = True if changes_to_apply: lowercase__ : Dict = getattr(lowercase_ , state.pop("type" ) ) lowercase__ : List[Any] = component_class(**lowercase_ ) setattr(self.backend_tokenizer , lowercase_ , lowercase_ ) @property def __UpperCamelCase ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def __UpperCamelCase ( self : Any , lowercase_ : Tuple ) -> Optional[int]: lowercase__ : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else value lowercase__ : str = value def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[Any] ) -> BatchEncoding: lowercase__ : Dict = kwargs.get("is_split_into_words" , lowercase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Dict , *lowercase_ : Any , **lowercase_ : Any ) -> BatchEncoding: lowercase__ : Optional[Any] = kwargs.get("is_split_into_words" , lowercase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Dict , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]: lowercase__ : Any = self._tokenizer.model.save(lowercase_ , name=lowercase_ ) return tuple(lowercase_ ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any , lowercase_ : Tuple=None ) -> Optional[Any]: lowercase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]: lowercase__ : Tuple = [self.sep_token_id] lowercase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
333
class snake_case_ : def __init__( self : int ) -> Optional[int]: lowercase__ : Optional[int] = 0 lowercase__ : List[str] = 0 lowercase__ : Any = {} def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]: if vertex not in self.adjacency: lowercase__ : List[Any] = {} self.num_vertices += 1 def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]: self.add_vertex(lowercase_ ) self.add_vertex(lowercase_ ) if head == tail: return lowercase__ : int = weight lowercase__ : Any = weight def __UpperCamelCase ( self : Dict ) -> Optional[int]: lowercase__ : List[Any] = self.get_edges() for edge in edges: lowercase__ , lowercase__ , lowercase__ : int = edge edges.remove((tail, head, weight) ) for i in range(len(lowercase_ ) ): lowercase__ : Tuple = list(edges[i] ) edges.sort(key=lambda lowercase_ : e[2] ) for i in range(len(lowercase_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: lowercase__ : int = edges[i][2] + 1 for edge in edges: lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge lowercase__ : Union[str, Any] = weight lowercase__ : Dict = weight def __str__( self : str ) -> Any: lowercase__ : str = "" for tail in self.adjacency: for head in self.adjacency[tail]: lowercase__ : Optional[Any] = self.adjacency[head][tail] string += F'''{head} -> {tail} == {weight}\n''' return string.rstrip("\n" ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: lowercase__ : Any = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def __UpperCamelCase ( self : List[str] ) -> Dict: return self.adjacency.keys() @staticmethod def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]: lowercase__ : Any = Graph() if vertices is None: lowercase__ : str = [] if edges is None: lowercase__ : List[Any] = [] for vertex in vertices: g.add_vertex(lowercase_ ) for edge in edges: g.add_edge(*lowercase_ ) return g class snake_case_ : def __init__( self : int ) -> List[str]: lowercase__ : Dict = {} lowercase__ : Tuple = {} def __len__( self : Union[str, Any] ) -> Union[str, Any]: return len(self.parent ) def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple: if item in self.parent: return self.find(lowercase_ ) lowercase__ : Union[str, Any] = item lowercase__ : int = 0 return item def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any: if item not in self.parent: return self.make_set(lowercase_ ) if item != self.parent[item]: lowercase__ : Union[str, Any] = self.find(self.parent[item] ) return self.parent[item] def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]: lowercase__ : Dict = self.find(lowercase_ ) lowercase__ : Optional[int] = self.find(lowercase_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: lowercase__ : Dict = roota return roota if self.rank[roota] < self.rank[roota]: lowercase__ : int = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 lowercase__ : Tuple = roota return roota return None @staticmethod def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]: lowercase__ : List[Any] = graph.num_vertices lowercase__ : Optional[Any] = Graph.UnionFind() lowercase__ : int = [] while num_components > 1: lowercase__ : List[Any] = {} for vertex in graph.get_vertices(): lowercase__ : Any = -1 lowercase__ : List[str] = graph.get_edges() for edge in edges: lowercase__ , lowercase__ , lowercase__ : str = edge edges.remove((tail, head, weight) ) for edge in edges: lowercase__ , lowercase__ , lowercase__ : List[str] = edge lowercase__ : List[str] = union_find.find(lowercase_ ) lowercase__ : Union[str, Any] = union_find.find(lowercase_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowercase__ : int = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowercase__ : Dict = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex] if union_find.find(lowercase_ ) != union_find.find(lowercase_ ): union_find.union(lowercase_ , lowercase_ ) mst_edges.append(cheap_edge[vertex] ) lowercase__ : Optional[Any] = num_components - 1 lowercase__ : List[Any] = Graph.build(edges=lowercase_ ) return mst
333
1
class snake_case_ : def __init__( self : Optional[Any] , lowercase_ : int ) -> Any: lowercase__ : Optional[int] = n lowercase__ : List[str] = [None] * self.n lowercase__ : Tuple = 0 # index of the first element lowercase__ : str = 0 lowercase__ : int = 0 def __len__( self : Any ) -> int: return self.size def __UpperCamelCase ( self : str ) -> bool: return self.size == 0 def __UpperCamelCase ( self : str ) -> Tuple: return False if self.is_empty() else self.array[self.front] def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int ) -> List[str]: if self.size >= self.n: raise Exception("QUEUE IS FULL" ) lowercase__ : Any = data lowercase__ : Dict = (self.rear + 1) % self.n self.size += 1 return self def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: if self.size == 0: raise Exception("UNDERFLOW" ) lowercase__ : Tuple = self.array[self.front] lowercase__ : int = None lowercase__ : Optional[Any] = (self.front + 1) % self.n self.size -= 1 return temp
333
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[Any] = DPTConfig() if "large" in checkpoint_url: lowercase__ : str = 1024 lowercase__ : List[str] = 4096 lowercase__ : List[Any] = 24 lowercase__ : Dict = 16 lowercase__ : Union[str, Any] = [5, 11, 17, 23] lowercase__ : Any = [256, 512, 1024, 1024] lowercase__ : Optional[int] = (1, 384, 384) if "ade" in checkpoint_url: lowercase__ : Union[str, Any] = True lowercase__ : Tuple = 150 lowercase__ : Optional[int] = "huggingface/label-files" lowercase__ : str = "ade20k-id2label.json" lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Optional[Any] = idalabel lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()} lowercase__ : Tuple = [1, 150, 480, 480] return config, expected_shape def lowercase_ ( _lowerCamelCase : List[Any]): lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Tuple): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder") if "pretrained.model" in name: lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings") if "patch_embed" in name: lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings") if "pos_embed" in name: lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings") if "attn.proj" in name: lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense") if "proj" in name and "project" not in name: lowercase__ : int = name.replace("proj" , "projection") if "blocks" in name: lowercase__ : List[str] = name.replace("blocks" , "layer") if "mlp.fc1" in name: lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense") if "mlp.fc2" in name: lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense") if "norm1" in name: lowercase__ : List[str] = name.replace("norm1" , "layernorm_before") if "norm2" in name: lowercase__ : Dict = name.replace("norm2" , "layernorm_after") if "scratch.output_conv" in name: lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head") if "scratch" in name: lowercase__ : str = name.replace("scratch" , "neck") if "layer1_rn" in name: lowercase__ : int = name.replace("layer1_rn" , "convs.0") if "layer2_rn" in name: lowercase__ : int = name.replace("layer2_rn" , "convs.1") if "layer3_rn" in name: lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2") if "layer4_rn" in name: lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3") if "refinenet" in name: lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1]) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''') if "out_conv" in name: lowercase__ : str = name.replace("out_conv" , "projection") if "resConfUnit1" in name: lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1") if "resConfUnit2" in name: lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2") if "conv1" in name: lowercase__ : List[Any] = name.replace("conv1" , "convolution1") if "conv2" in name: lowercase__ : Tuple = name.replace("conv2" , "convolution2") # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0") if "pretrained.act_postprocess2.0.project.0" in name: lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0") if "pretrained.act_postprocess3.0.project.0" in name: lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0") if "pretrained.act_postprocess4.0.project.0" in name: lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0") # resize blocks if "pretrained.act_postprocess1.3" in name: lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection") if "pretrained.act_postprocess1.4" in name: lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize") if "pretrained.act_postprocess2.3" in name: lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection") if "pretrained.act_postprocess2.4" in name: lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize") if "pretrained.act_postprocess3.3" in name: lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection") if "pretrained.act_postprocess4.3" in name: lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection") if "pretrained.act_postprocess4.4" in name: lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize") if "pretrained" in name: lowercase__ : Any = name.replace("pretrained" , "dpt") if "bn" in name: lowercase__ : str = name.replace("bn" , "batch_norm") if "head" in name: lowercase__ : Optional[Any] = name.replace("head" , "head.head") if "encoder.norm" in name: lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm") if "auxlayer" in name: lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head") return name def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''') lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''') # next, add query, keys and values (in that order) to the state dict lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :] lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size] lowercase__ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] lowercase__ : int = in_proj_bias[-config.hidden_size :] def lowercase_ ( ): lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw) return im @torch.no_grad() def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict): lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase) # load original state_dict from URL lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu") # remove certain keys remove_ignore_keys_(_lowerCamelCase) # rename keys for key in state_dict.copy().keys(): lowercase__ : List[str] = state_dict.pop(_lowerCamelCase) lowercase__ : List[Any] = val # read in qkv matrices read_in_q_k_v(_lowerCamelCase , _lowerCamelCase) # load HuggingFace model lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase) model.load_state_dict(_lowerCamelCase) model.eval() # Check outputs on an image lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384 lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase) lowercase__ : List[str] = prepare_img() lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt") # forward pass lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth # Assert logits lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]]) if "ade" in checkpoint_url: lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]]) assert outputs.shape == torch.Size(_lowerCamelCase) assert ( torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase) ) Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase) print(f'''Saving model to {pytorch_dump_folder_path}''') model.save_pretrained(_lowerCamelCase) print(f'''Saving image processor to {pytorch_dump_folder_path}''') image_processor.save_pretrained(_lowerCamelCase) if push_to_hub: print("Pushing model to hub...") model.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) UpperCamelCase = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
333
1
def lowercase_ ( _lowerCamelCase : list[int] , _lowerCamelCase : list[int]): # Check if the input is valid if not len(_lowerCamelCase) == len(_lowerCamelCase) == 3: raise ValueError("Please enter a valid equation.") if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero.") # Extract the coefficients lowercase__ , lowercase__ , lowercase__ : List[Any] = equationa lowercase__ , lowercase__ , lowercase__ : str = equationa # Calculate the determinants of the matrices lowercase__ : Optional[int] = aa * ba - aa * ba lowercase__ : Dict = ca * ba - ca * ba lowercase__ : List[str] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)") else: raise ValueError("No solution. (Inconsistent system)") else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: lowercase__ : List[str] = determinant_x / determinant lowercase__ : str = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
333
def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000): lowercase__ : Union[str, Any] = 1 lowercase__ : int = 0 for divide_by_number in range(_lowerCamelCase , digit + 1): lowercase__ : list[int] = [] lowercase__ : Dict = numerator for _ in range(1 , digit + 1): if now_divide in has_been_divided: if longest_list_length < len(_lowerCamelCase): lowercase__ : Union[str, Any] = len(_lowerCamelCase) lowercase__ : Optional[int] = divide_by_number else: has_been_divided.append(_lowerCamelCase) lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
333
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def lowercase_ ( _lowerCamelCase : Optional[int]): return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device) def lowercase_ ( _lowerCamelCase : List[str]): lowercase__ : List[Any] = create_tensor(_lowerCamelCase) lowercase__ : Any = gather(_lowerCamelCase) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1)) def lowercase_ ( _lowerCamelCase : str): lowercase__ : List[str] = [state.process_index] lowercase__ : Tuple = gather_object(_lowerCamelCase) assert len(_lowerCamelCase) == state.num_processes, f'''{gathered_obj}, {len(_lowerCamelCase)} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes)), f'''{gathered_obj} != {list(range(state.num_processes))}''' def lowercase_ ( _lowerCamelCase : Optional[Any]): lowercase__ : Optional[int] = create_tensor(_lowerCamelCase) lowercase__ : Optional[Any] = broadcast(_lowerCamelCase) assert broadcasted_tensor.shape == torch.Size([state.num_processes]) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1)) def lowercase_ ( _lowerCamelCase : str): # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: lowercase__ : int = torch.arange(state.num_processes + 1).to(state.device) else: lowercase__ : int = torch.arange(state.num_processes).to(state.device) lowercase__ : Tuple = pad_across_processes(_lowerCamelCase) assert padded_tensor.shape == torch.Size([state.num_processes + 1]) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes)) + [0] def lowercase_ ( _lowerCamelCase : List[Any]): # For now runs on only two processes if state.num_processes != 2: return lowercase__ : List[Any] = create_tensor(_lowerCamelCase) lowercase__ : List[str] = reduce(_lowerCamelCase , "sum") lowercase__ : Union[str, Any] = torch.tensor([4.0, 6]).to(state.device) assert torch.allclose(_lowerCamelCase , _lowerCamelCase), f'''{reduced_tensor} != {truth_tensor}''' def lowercase_ ( _lowerCamelCase : str): # For now runs on only two processes if state.num_processes != 2: return lowercase__ : Optional[Any] = create_tensor(_lowerCamelCase) lowercase__ : List[str] = reduce(_lowerCamelCase , "mean") lowercase__ : List[Any] = torch.tensor([2.0, 3]).to(state.device) assert torch.allclose(_lowerCamelCase , _lowerCamelCase), f'''{reduced_tensor} != {truth_tensor}''' def lowercase_ ( _lowerCamelCase : List[Any]): # For xla_spawn (TPUs) main() def lowercase_ ( ): lowercase__ : Union[str, Any] = PartialState() state.print(f'''State: {state}''') state.print("testing gather") test_gather(_lowerCamelCase) state.print("testing gather_object") test_gather_object(_lowerCamelCase) state.print("testing broadcast") test_broadcast(_lowerCamelCase) state.print("testing pad_across_processes") test_pad_across_processes(_lowerCamelCase) state.print("testing reduce_sum") test_reduce_sum(_lowerCamelCase) state.print("testing reduce_mean") test_reduce_mean(_lowerCamelCase) if __name__ == "__main__": main()
333
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ): __A : int = StableUnCLIPPipeline __A : int = TEXT_TO_IMAGE_PARAMS __A : Any = TEXT_TO_IMAGE_BATCH_PARAMS __A : int = TEXT_TO_IMAGE_IMAGE_PARAMS __A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __A : int = False def __UpperCamelCase ( self : Optional[int] ) -> List[str]: lowercase__ : str = 32 lowercase__ : Any = embedder_hidden_size # prior components torch.manual_seed(0 ) lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowercase__ : List[str] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) lowercase__ : Any = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , ) torch.manual_seed(0 ) lowercase__ : Union[str, Any] = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ ) lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowercase__ : Tuple = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) lowercase__ : str = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , ) torch.manual_seed(0 ) lowercase__ : Any = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , ) torch.manual_seed(0 ) lowercase__ : List[str] = AutoencoderKL() lowercase__ : List[Any] = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any: if str(lowercase_ ).startswith("mps" ): lowercase__ : Any = torch.manual_seed(lowercase_ ) else: lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase__ : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: lowercase__ : Union[str, Any] = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ ) def __UpperCamelCase ( self : List[Any] ) -> List[str]: lowercase__ : str = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowercase_ ) @slow @require_torch_gpu class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Tuple ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : int ) -> int: lowercase__ : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" ) lowercase__ : Optional[int] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) lowercase__ : int = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase__ : str = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) lowercase__ : Any = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
333
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase = { '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
333
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False): try: lowercase__ : str = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : Union[str, Any] = default else: # KEY is set, convert it to True or False. try: lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) def lowercase_ ( _lowerCamelCase : int): return unittest.skip("Test was skipped")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Tuple): return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Dict): return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless( is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]): return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : str): return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None): if test_case is None: return partial(_lowerCamelCase , version=_lowerCamelCase) return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]): return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase) UpperCamelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless( _atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase) class snake_case_ ( unittest.TestCase ): __A : int = True @classmethod def __UpperCamelCase ( cls : str ) -> str: lowercase__ : str = tempfile.mkdtemp() @classmethod def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __UpperCamelCase ( self : str ) -> Optional[int]: if self.clear_on_setup: for path in Path(self.tmpdir ).glob("**/*" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(lowercase_ ) class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str: lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase_ ( _lowerCamelCase : int): lowercase__ : Tuple = AcceleratorState() lowercase__ : Optional[int] = tensor[None].clone().to(state.device) lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu() lowercase__ : Optional[Any] = tensor[0].cpu() for i in range(tensors.shape[0]): if not torch.equal(tensors[i] , _lowerCamelCase): return False return True class snake_case_ : def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]: lowercase__ : int = returncode lowercase__ : Dict = stdout lowercase__ : List[Any] = stderr async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str): while True: lowercase__ : int = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : str = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : Tuple = [] lowercase__ : List[Any] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))), asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True): lowercase__ : Optional[Any] = asyncio.get_event_loop() lowercase__ : List[Any] = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : str = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Dict = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') return result class snake_case_ ( __A ): pass def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False): try: lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT) if return_stdout: if hasattr(_lowerCamelCase , "decode"): lowercase__ : Optional[Any] = output.decode("utf-8") return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
333
1
class snake_case_ : def __init__( self : List[Any] , lowercase_ : str = "" , lowercase_ : bool = False ) -> None: # Mapping from the first character of the prefix of the node lowercase__ : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word lowercase__ : Union[str, Any] = is_leaf lowercase__ : Union[str, Any] = prefix def __UpperCamelCase ( self : str , lowercase_ : str ) -> tuple[str, str, str]: lowercase__ : List[str] = 0 for q, w in zip(self.prefix , lowercase_ ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def __UpperCamelCase ( self : List[Any] , lowercase_ : list[str] ) -> None: for word in words: self.insert(lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str ) -> None: # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: lowercase__ : Dict = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: lowercase__ : List[str] = RadixNode(prefix=lowercase_ , is_leaf=lowercase_ ) else: lowercase__ : Dict = self.nodes[word[0]] lowercase__ , lowercase__ , lowercase__ : Optional[Any] = incoming_node.match( lowercase_ ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(lowercase_ ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: lowercase__ : Optional[Any] = remaining_prefix lowercase__ : Dict = self.nodes[matching_string[0]] lowercase__ : Optional[Any] = RadixNode(lowercase_ , lowercase_ ) lowercase__ : Tuple = aux_node if remaining_word == "": lowercase__ : int = True else: self.nodes[matching_string[0]].insert(lowercase_ ) def __UpperCamelCase ( self : Any , lowercase_ : str ) -> bool: lowercase__ : Optional[int] = self.nodes.get(word[0] , lowercase_ ) if not incoming_node: return False else: lowercase__ , lowercase__ , lowercase__ : Optional[Any] = incoming_node.match( lowercase_ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : str ) -> bool: lowercase__ : Dict = self.nodes.get(word[0] , lowercase_ ) if not incoming_node: return False else: lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = incoming_node.match( lowercase_ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(lowercase_ ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: lowercase__ : Optional[int] = list(self.nodes.values() )[0] lowercase__ : Dict = merging_node.is_leaf self.prefix += merging_node.prefix lowercase__ : int = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: lowercase__ : Union[str, Any] = False # If there is 1 edge, we merge it with its child else: lowercase__ : Optional[int] = list(incoming_node.nodes.values() )[0] lowercase__ : Optional[Any] = merging_node.is_leaf incoming_node.prefix += merging_node.prefix lowercase__ : Any = merging_node.nodes return True def __UpperCamelCase ( self : Tuple , lowercase_ : int = 0 ) -> None: if self.prefix != "": print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def lowercase_ ( ): lowercase__ : Optional[int] = "banana bananas bandana band apple all beast".split() lowercase__ : List[Any] = RadixNode() root.insert_many(_lowerCamelCase) assert all(root.find(_lowerCamelCase) for word in words) assert not root.find("bandanas") assert not root.find("apps") root.delete("all") assert not root.find("all") root.delete("banana") assert not root.find("banana") assert root.find("bananas") return True def lowercase_ ( ): assert test_trie() def lowercase_ ( ): lowercase__ : str = RadixNode() lowercase__ : List[Any] = "banana bananas bandanas bandana band apple all beast".split() root.insert_many(_lowerCamelCase) print("Words:" , _lowerCamelCase) print("Tree:") root.print_tree() if __name__ == "__main__": main()
333
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : int = ["flax"] def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Optional[Any] = ["flax"] def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Optional[int] = ["flax"] def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : int = ["flax"] def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[str] = ["flax"] def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]: requires_backends(cls , ["flax"] )
333
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class snake_case_ ( __A ): __A : Dict = "facebook/bart-large-mnli" __A : Any = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) __A : Tuple = "text_classifier" __A : int = AutoTokenizer __A : List[Any] = AutoModelForSequenceClassification __A : List[str] = ["text", ["text"]] __A : int = ["text"] def __UpperCamelCase ( self : Dict ) -> Any: super().setup() lowercase__ : Any = self.model.config lowercase__ : Tuple = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): lowercase__ : Any = int(lowercase_ ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ) -> Dict: lowercase__ : Any = labels return self.pre_processor( [text] * len(lowercase_ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , ) def __UpperCamelCase ( self : int , lowercase_ : str ) -> Optional[int]: lowercase__ : Dict = outputs.logits lowercase__ : str = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
333
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class snake_case_ ( __A ): __A : List[str] = "vit_mae" def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]: super().__init__(**lowercase_ ) lowercase__ : List[str] = hidden_size lowercase__ : str = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : List[Any] = intermediate_size lowercase__ : str = hidden_act lowercase__ : List[str] = hidden_dropout_prob lowercase__ : Optional[Any] = attention_probs_dropout_prob lowercase__ : Any = initializer_range lowercase__ : Optional[Any] = layer_norm_eps lowercase__ : Optional[Any] = image_size lowercase__ : Optional[int] = patch_size lowercase__ : Any = num_channels lowercase__ : str = qkv_bias lowercase__ : Optional[Any] = decoder_num_attention_heads lowercase__ : Any = decoder_hidden_size lowercase__ : Any = decoder_num_hidden_layers lowercase__ : Union[str, Any] = decoder_intermediate_size lowercase__ : int = mask_ratio lowercase__ : Tuple = norm_pix_loss
333
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''', } class snake_case_ ( __A ): __A : int = "open-llama" def __init__( self : List[Any] , lowercase_ : Optional[int]=10_00_00 , lowercase_ : int=40_96 , lowercase_ : Union[str, Any]=1_10_08 , lowercase_ : List[Any]=32 , lowercase_ : List[Any]=32 , lowercase_ : Tuple="silu" , lowercase_ : List[Any]=20_48 , lowercase_ : str=0.02 , lowercase_ : Tuple=1E-6 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : int=2 , lowercase_ : Optional[Any]=False , lowercase_ : List[str]=True , lowercase_ : List[str]=0.1 , lowercase_ : str=0.1 , lowercase_ : Optional[int]=True , lowercase_ : Dict=True , lowercase_ : str=None , **lowercase_ : Optional[Any] , ) -> int: lowercase__ : Union[str, Any] = vocab_size lowercase__ : int = max_position_embeddings lowercase__ : Tuple = hidden_size lowercase__ : int = intermediate_size lowercase__ : List[str] = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : str = hidden_act lowercase__ : List[str] = initializer_range lowercase__ : str = rms_norm_eps lowercase__ : str = use_cache lowercase__ : Any = kwargs.pop( "use_memorry_efficient_attention" , lowercase_ ) lowercase__ : Any = hidden_dropout_prob lowercase__ : int = attention_dropout_prob lowercase__ : Dict = use_stable_embedding lowercase__ : Any = shared_input_output_embedding lowercase__ : Any = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , ) def __UpperCamelCase ( self : Any ) -> Any: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " F'''got {self.rope_scaling}''' ) lowercase__ : int = self.rope_scaling.get("type" , lowercase_ ) lowercase__ : Union[str, Any] = self.rope_scaling.get("factor" , lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
333
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): while a != 0: lowercase__ , lowercase__ : Dict = b % a, a return b def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): if gcd(_lowerCamelCase , _lowerCamelCase) != 1: lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(_lowerCamelCase) lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m while va != 0: lowercase__ : Tuple = ua // va lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
333
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case_ ( __A ): __A : Any = ["image_processor", "tokenizer"] __A : List[Any] = "CLIPImageProcessor" __A : List[Any] = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=None , lowercase_ : Tuple=None , **lowercase_ : Tuple ) -> Any: lowercase__ : Dict = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) lowercase__ : Any = kwargs.pop("feature_extractor" ) lowercase__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self : int , lowercase_ : List[str]=None , lowercase_ : Tuple=None , lowercase_ : Dict=None , **lowercase_ : Union[str, Any] ) -> Dict: if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowercase__ : Optional[int] = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ ) if images is not None: lowercase__ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ ) if text is not None and images is not None: lowercase__ : Optional[int] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ ) def __UpperCamelCase ( self : Tuple , *lowercase_ : Dict , **lowercase_ : Optional[Any] ) -> int: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Any , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Tuple: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def __UpperCamelCase ( self : Any ) -> Optional[int]: lowercase__ : Optional[int] = self.tokenizer.model_input_names lowercase__ : List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __UpperCamelCase ( self : Tuple ) -> List[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : List[Any] ) -> List[str]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , ) return self.image_processor
333
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "): lowercase__ : Union[str, Any] = text.split(_lowerCamelCase) return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)] def lowercase_ ( _lowerCamelCase : dict): lowercase__ , lowercase__ : List[str] = [], [] for title, text in zip(documents["title"] , documents["text"]): if text is not None: for passage in split_text(_lowerCamelCase): titles.append(title if title is not None else "") texts.append(_lowerCamelCase) return {"title": titles, "text": texts} def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast): lowercase__ : Union[str, Any] = ctx_tokenizer( documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"] lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ): ###################################### logger.info("Step 1 - Create the dataset") ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase__ : str = load_dataset( "csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"]) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc) # And compute the embeddings lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase) lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) lowercase__ : List[Any] = Features( {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space lowercase__ : List[Any] = dataset.map( partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , ) # And finally save your dataset lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset") dataset.save_to_disk(_lowerCamelCase) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset") ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT) dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase) # And save the index lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss") dataset.get_index("embeddings").save(_lowerCamelCase) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class snake_case_ : __A : str = field( default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,) __A : Optional[str] = field( default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,) __A : str = field( default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,) __A : str = field( default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } ,) __A : Optional[str] = field( default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,) @dataclass class snake_case_ : __A : Optional[int] = field( default=__A ,metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } ,) __A : int = field( default=16 ,metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } ,) @dataclass class snake_case_ : __A : int = field( default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,) __A : int = field( default=128 ,metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } ,) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
333
1
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive") lowercase__ : Tuple = str(bin(_lowerCamelCase))[2:] # remove the leading "0b" lowercase__ : List[Any] = str(bin(_lowerCamelCase))[2:] lowercase__ : Any = max(len(_lowerCamelCase) , len(_lowerCamelCase)) return "0b" + "".join( str(int("1" in (char_a, char_b))) for char_a, char_b in zip(a_binary.zfill(_lowerCamelCase) , b_binary.zfill(_lowerCamelCase))) if __name__ == "__main__": import doctest doctest.testmod()
333
import argparse import datetime def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[Any] = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_lowerCamelCase) < 11: raise ValueError("Must be 10 characters long") # Get month lowercase__ : int = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12") lowercase__ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'") # Get day lowercase__ : int = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31") # Get second separator lowercase__ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'") # Get year lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?") # Get datetime obj for validation lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase)) # Start math if m <= 2: lowercase__ : Optional[Any] = y - 1 lowercase__ : int = m + 12 # maths var lowercase__ : int = int(str(_lowerCamelCase)[:2]) lowercase__ : int = int(str(_lowerCamelCase)[2:]) lowercase__ : int = int(2.6 * m - 5.39) lowercase__ : int = int(c / 4) lowercase__ : int = int(k / 4) lowercase__ : int = int(d + k) lowercase__ : int = int(t + u + v + x) lowercase__ : int = int(z - (2 * c)) lowercase__ : int = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer.") # Response lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) UpperCamelCase = parser.parse_args() zeller(args.date_input)
333
1
import functools from typing import Any def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : list[str]): # Validation if not isinstance(_lowerCamelCase , _lowerCamelCase) or len(_lowerCamelCase) == 0: raise ValueError("the string should be not empty string") if not isinstance(_lowerCamelCase , _lowerCamelCase) or not all( isinstance(_lowerCamelCase , _lowerCamelCase) and len(_lowerCamelCase) > 0 for item in words): raise ValueError("the words should be a list of non-empty strings") # Build trie lowercase__ : dict[str, Any] = {} lowercase__ : Any = "WORD_KEEPER" for word in words: lowercase__ : int = trie for c in word: if c not in trie_node: lowercase__ : Optional[int] = {} lowercase__ : Any = trie_node[c] lowercase__ : Union[str, Any] = True lowercase__ : int = len(_lowerCamelCase) # Dynamic programming method @functools.cache def is_breakable(_lowerCamelCase : int) -> bool: if index == len_string: return True lowercase__ : Any = trie for i in range(_lowerCamelCase , _lowerCamelCase): lowercase__ : Any = trie_node.get(string[i] , _lowerCamelCase) if trie_node is None: return False if trie_node.get(_lowerCamelCase , _lowerCamelCase) and is_breakable(i + 1): return True return False return is_breakable(0) if __name__ == "__main__": import doctest doctest.testmod()
333
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node UpperCamelCase = 4 UpperCamelCase = 3 class snake_case_ ( __A ): pass def lowercase_ ( _lowerCamelCase : List[str]): for shard in shards: for i in range(_lowerCamelCase): yield {"i": i, "shard": shard} def lowercase_ ( ): lowercase__ : List[str] = int(os.environ["RANK"]) lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"]) lowercase__ : Union[str, Any] = ArgumentParser() parser.add_argument("--streaming" , type=_lowerCamelCase) parser.add_argument("--local_rank" , type=_lowerCamelCase) parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0) lowercase__ : int = parser.parse_args() lowercase__ : Union[str, Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]} lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase) if not streaming: lowercase__ : str = Dataset.from_list(list(_lowerCamelCase)) lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase) lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase) lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : Any = full_size // world_size expected_local_size += int(rank < (full_size % world_size)) lowercase__ : List[str] = sum(1 for _ in dataloader) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''') if __name__ == "__main__": main()
333
1
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): for param, grad_param in zip(model_a.parameters() , model_b.parameters()): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True): model.train() lowercase__ : Tuple = model(_lowerCamelCase) lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device)) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False): set_seed(42) lowercase__ : Dict = RegressionModel() lowercase__ : int = deepcopy(_lowerCamelCase) lowercase__ : str = RegressionDataset(length=80) lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16) model.to(accelerator.device) if sched: lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3) lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3) lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65) lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65) # Make a copy of `model` if sched: lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowercase_ ( _lowerCamelCase : Tuple): # Test when on a single CPU or GPU that the context manager does nothing lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase) # Use a single batch lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values() for iteration in range(3): # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: # Sync grads step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))] def lowercase_ ( _lowerCamelCase : Any): # Test on distributed setup that context manager behaves properly lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase) # Use a single batch lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values() for iteration in range(3): # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: # Sync grads step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))] def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False): lowercase__ : int = Accelerator( split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2) # Test that context manager behaves properly lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase) for iteration, batch in enumerate(_lowerCamelCase): lowercase__ , lowercase__ : str = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) with accelerator.accumulate(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))] GradientState._reset_state() def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False): lowercase__ : Dict = Accelerator( split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2) # Test that context manager behaves properly lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase) for iteration, batch in enumerate(_lowerCamelCase): lowercase__ , lowercase__ : Any = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)): if split_batches: sched.step() else: for _ in range(accelerator.num_processes): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase)) if accelerator.num_processes > 1: check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) GradientState._reset_state() def lowercase_ ( ): lowercase__ : List[str] = Accelerator() lowercase__ : List[Any] = RegressionDataset(length=80) lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16) lowercase__ : int = RegressionDataset(length=96) lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16) lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_lowerCamelCase): assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase) if iteration < len(_lowerCamelCase) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_lowerCamelCase): assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase) if batch_num < len(_lowerCamelCase) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowercase_ ( ): lowercase__ : str = Accelerator() lowercase__ : Dict = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**") test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**") test_noop_sync(_lowerCamelCase) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**") test_distributed_sync(_lowerCamelCase) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Any): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
333
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class snake_case_ ( __A ): __A : List[str] = "unispeech" def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any: super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ ) lowercase__ : List[str] = hidden_size lowercase__ : Any = feat_extract_norm lowercase__ : Optional[Any] = feat_extract_activation lowercase__ : Dict = list(lowercase_ ) lowercase__ : Union[str, Any] = list(lowercase_ ) lowercase__ : List[str] = list(lowercase_ ) lowercase__ : List[str] = conv_bias lowercase__ : Any = num_conv_pos_embeddings lowercase__ : Dict = num_conv_pos_embedding_groups lowercase__ : int = len(self.conv_dim ) lowercase__ : str = num_hidden_layers lowercase__ : Any = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : int = num_attention_heads lowercase__ : Union[str, Any] = hidden_dropout lowercase__ : Any = attention_dropout lowercase__ : Union[str, Any] = activation_dropout lowercase__ : Any = feat_proj_dropout lowercase__ : str = final_dropout lowercase__ : int = layerdrop lowercase__ : Optional[int] = layer_norm_eps lowercase__ : List[Any] = initializer_range lowercase__ : Any = num_ctc_classes lowercase__ : int = vocab_size lowercase__ : str = do_stable_layer_norm lowercase__ : Any = use_weighted_layer_sum lowercase__ : Dict = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__ : List[Any] = apply_spec_augment lowercase__ : Dict = mask_time_prob lowercase__ : Tuple = mask_time_length lowercase__ : str = mask_time_min_masks lowercase__ : List[Any] = mask_feature_prob lowercase__ : int = mask_feature_length lowercase__ : Optional[int] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase__ : Optional[int] = num_codevectors_per_group lowercase__ : List[str] = num_codevector_groups lowercase__ : Dict = contrastive_logits_temperature lowercase__ : Tuple = feat_quantizer_dropout lowercase__ : Any = num_negatives lowercase__ : Dict = codevector_dim lowercase__ : Tuple = proj_codevector_dim lowercase__ : List[str] = diversity_loss_weight # ctc loss lowercase__ : Tuple = ctc_loss_reduction lowercase__ : Dict = ctc_zero_infinity # pretraining loss lowercase__ : Optional[Any] = replace_prob @property def __UpperCamelCase ( self : Dict ) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1 )
333
1
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class snake_case_ ( yaml.SafeLoader ): def __UpperCamelCase ( self : Optional[Any] , lowercase_ : List[str] ) -> Union[str, Any]: lowercase__ : int = [self.constructed_objects[key_node] for key_node, _ in node.value] lowercase__ : Dict = [tuple(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else key for key in keys] lowercase__ : Optional[Any] = Counter(lowercase_ ) lowercase__ : Any = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' ) def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Any=False ) -> int: lowercase__ : Union[str, Any] = super().construct_mapping(lowercase_ , deep=lowercase_ ) self._check_no_duplicates_on_constructed_node(lowercase_ ) return mapping def lowercase_ ( _lowerCamelCase : str): lowercase__ : Union[str, Any] = list(readme_content.splitlines()) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowercase__ : List[Any] = full_content[1:].index("---") + 1 lowercase__ : Any = "\n".join(full_content[1:sep_idx]) return yamlblock, "\n".join(full_content[sep_idx + 1 :]) return None, "\n".join(_lowerCamelCase) class snake_case_ ( __A ): # class attributes __A : List[str] = {"train_eval_index"} # train-eval-index in the YAML metadata @classmethod def __UpperCamelCase ( cls : str , lowercase_ : Path ) -> "DatasetMetadata": with open(lowercase_ , encoding="utf-8" ) as readme_file: lowercase__ , lowercase__ : str = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(lowercase_ ) else: return cls() def __UpperCamelCase ( self : Optional[int] , lowercase_ : Path ) -> str: if path.exists(): with open(lowercase_ , encoding="utf-8" ) as readme_file: lowercase__ : Any = readme_file.read() else: lowercase__ : Union[str, Any] = None lowercase__ : List[str] = self._to_readme(lowercase_ ) with open(lowercase_ , "w" , encoding="utf-8" ) as readme_file: readme_file.write(lowercase_ ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[str] = None ) -> str: if readme_content is not None: lowercase__ , lowercase__ : Tuple = _split_yaml_from_readme(lowercase_ ) lowercase__ : Any = "---\n" + self.to_yaml_string() + "---\n" + content else: lowercase__ : int = "---\n" + self.to_yaml_string() + "---\n" return full_content @classmethod def __UpperCamelCase ( cls : Optional[int] , lowercase_ : str ) -> "DatasetMetadata": lowercase__ : int = yaml.load(lowercase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowercase__ : List[str] = { (key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**lowercase_ ) def __UpperCamelCase ( self : Optional[int] ) -> str: return yaml.safe_dump( { (key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=lowercase_ , allow_unicode=lowercase_ , encoding="utf-8" , ).decode("utf-8" ) UpperCamelCase = { '''image-classification''': [], '''translation''': [], '''image-segmentation''': [], '''fill-mask''': [], '''automatic-speech-recognition''': [], '''token-classification''': [], '''sentence-similarity''': [], '''audio-classification''': [], '''question-answering''': [], '''summarization''': [], '''zero-shot-classification''': [], '''table-to-text''': [], '''feature-extraction''': [], '''other''': [], '''multiple-choice''': [], '''text-classification''': [], '''text-to-image''': [], '''text2text-generation''': [], '''zero-shot-image-classification''': [], '''tabular-classification''': [], '''tabular-regression''': [], '''image-to-image''': [], '''tabular-to-text''': [], '''unconditional-image-generation''': [], '''text-retrieval''': [], '''text-to-speech''': [], '''object-detection''': [], '''audio-to-audio''': [], '''text-generation''': [], '''conversational''': [], '''table-question-answering''': [], '''visual-question-answering''': [], '''image-to-text''': [], '''reinforcement-learning''': [], '''voice-activity-detection''': [], '''time-series-forecasting''': [], '''document-question-answering''': [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCamelCase = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''') ap.add_argument('''readme_filepath''') UpperCamelCase = ap.parse_args() UpperCamelCase = Path(args.readme_filepath) UpperCamelCase = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
333
def lowercase_ ( _lowerCamelCase : list): for i in range(len(_lowerCamelCase) - 1 , 0 , -1): lowercase__ : int = False for j in range(_lowerCamelCase , 0 , -1): if unsorted[j] < unsorted[j - 1]: lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j] lowercase__ : List[str] = True for j in range(_lowerCamelCase): if unsorted[j] > unsorted[j + 1]: lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j] lowercase__ : Dict = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(f"{cocktail_shaker_sort(unsorted) = }")
333
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json''' ), '''distilbert-base-uncased-finetuned-sst-2-english''': ( '''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json''' ), } class snake_case_ ( __A ): __A : int = "distilbert" __A : int = { "hidden_size": "dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", } def __init__( self : List[str] , lowercase_ : int=3_05_22 , lowercase_ : Any=5_12 , lowercase_ : Tuple=False , lowercase_ : Any=6 , lowercase_ : int=12 , lowercase_ : List[str]=7_68 , lowercase_ : Dict=4 * 7_68 , lowercase_ : List[str]=0.1 , lowercase_ : int=0.1 , lowercase_ : int="gelu" , lowercase_ : Any=0.02 , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=0.2 , lowercase_ : List[Any]=0 , **lowercase_ : Any , ) -> List[str]: lowercase__ : Optional[Any] = vocab_size lowercase__ : Optional[int] = max_position_embeddings lowercase__ : Dict = sinusoidal_pos_embds lowercase__ : int = n_layers lowercase__ : Tuple = n_heads lowercase__ : Any = dim lowercase__ : List[Any] = hidden_dim lowercase__ : Optional[Any] = dropout lowercase__ : Optional[Any] = attention_dropout lowercase__ : str = activation lowercase__ : Dict = initializer_range lowercase__ : Tuple = qa_dropout lowercase__ : Tuple = seq_classif_dropout super().__init__(**lowercase_ , pad_token_id=lowercase_ ) class snake_case_ ( __A ): @property def __UpperCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
333
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask UpperCamelCase = logging.getLogger(__name__) class snake_case_ ( __A ): __A : int = "token-classification" def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]: if type(lowercase_ ) == dict: lowercase__ : Dict = Namespace(**lowercase_ ) lowercase__ : str = import_module("tasks" ) try: lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type ) lowercase__ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels ) lowercase__ : int = CrossEntropyLoss().ignore_index super().__init__(lowercase_ , len(self.labels ) , self.mode ) def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any: return self.model(**lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple: lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": lowercase__ : Tuple = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids lowercase__ : Optional[int] = self(**lowercase_ ) lowercase__ : Union[str, Any] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]: lowercase__ : Tuple = self.hparams for mode in ["train", "dev", "test"]: lowercase__ : Any = self._feature_file(lowercase_ ) if os.path.exists(lowercase_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , lowercase_ ) lowercase__ : str = torch.load(lowercase_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ ) lowercase__ : Dict = self.token_classification_task.convert_examples_to_features( lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("Saving features into cached file %s" , lowercase_ ) torch.save(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader: lowercase__ : str = self._feature_file(lowercase_ ) logger.info("Loading features from cached file %s" , lowercase_ ) lowercase__ : str = torch.load(lowercase_ ) lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str: """Compute validation""" "" lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": lowercase__ : int = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids lowercase__ : List[Any] = self(**lowercase_ ) lowercase__ , lowercase__ : Any = outputs[:2] lowercase__ : Optional[Any] = logits.detach().cpu().numpy() lowercase__ : int = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]: lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean() lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 ) lowercase__ : Dict = np.argmax(lowercase_ , axis=2 ) lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 ) lowercase__ : Any = dict(enumerate(self.labels ) ) lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )] lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) lowercase__ : Any = { "val_loss": val_loss_mean, "accuracy_score": accuracy_score(lowercase_ , lowercase_ ), "precision": precision_score(lowercase_ , lowercase_ ), "recall": recall_score(lowercase_ , lowercase_ ), "f1": fa_score(lowercase_ , lowercase_ ), } lowercase__ : List[Any] = dict(results.items() ) lowercase__ : List[str] = results return ret, preds_list, out_label_list def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict: # when stable lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ ) lowercase__ : Any = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int: # updating to test_epoch_end instead of deprecated test_end lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 lowercase__ : Optional[int] = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple: # Add NER specific options BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ ) parser.add_argument( "--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" ) parser.add_argument( "--max_seq_length" , default=1_28 , type=lowercase_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , ) parser.add_argument( "--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd()) UpperCamelCase = parser.parse_args() UpperCamelCase = NERTransformer(args) UpperCamelCase = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True)) UpperCamelCase = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
333
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase = { '''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''], '''tokenization_roc_bert''': ['''RoCBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoCBertForCausalLM''', '''RoCBertForMaskedLM''', '''RoCBertForMultipleChoice''', '''RoCBertForPreTraining''', '''RoCBertForQuestionAnswering''', '''RoCBertForSequenceClassification''', '''RoCBertForTokenClassification''', '''RoCBertLayer''', '''RoCBertModel''', '''RoCBertPreTrainedModel''', '''load_tf_weights_in_roc_bert''', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
333
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { '''configuration_mask2former''': [ '''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Mask2FormerConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Mask2FormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Mask2FormerForUniversalSegmentation''', '''Mask2FormerModel''', '''Mask2FormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
333
1
from __future__ import annotations from typing import Any def lowercase_ ( _lowerCamelCase : list[Any]): create_state_space_tree(_lowerCamelCase , [] , 0) def lowercase_ ( _lowerCamelCase : list[Any] , _lowerCamelCase : list[Any] , _lowerCamelCase : int): if index == len(_lowerCamelCase): print(_lowerCamelCase) return create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1) current_subsequence.append(sequence[index]) create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1) current_subsequence.pop() if __name__ == "__main__": UpperCamelCase = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['''A''', '''B''', '''C''']) generate_all_subsequences(seq)
333
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowercase_ ( _lowerCamelCase : List[str]): return 1 / (1 + np.exp(-z)) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple): return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase))) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000): lowercase__ : Optional[int] = np.zeros(x.shape[1]) for iterations in range(_lowerCamelCase): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Tuple = sigmoid_function(_lowerCamelCase) lowercase__ : Dict = np.dot(x.T , h - y) / y.size lowercase__ : int = theta - alpha * gradient # updating the weights lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase) lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase) if iterations % 100 == 0: print(f'''loss: {j} \t''') # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase = datasets.load_iris() UpperCamelCase = iris.data[:, :2] UpperCamelCase = (iris.target != 0) * 1 UpperCamelCase = 0.1 UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000) print('''theta: ''', theta) # printing the theta i.e our weights vector def lowercase_ ( _lowerCamelCase : List[Any]): return sigmoid_function( np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
333
1
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''nielsr/canine-s''': 2048, } # Unicode defines 1,114,112 total “codepoints” UpperCamelCase = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py UpperCamelCase = 0 UpperCamelCase = 0xe_0_0_0 UpperCamelCase = 0xe_0_0_1 UpperCamelCase = 0xe_0_0_2 UpperCamelCase = 0xe_0_0_3 UpperCamelCase = 0xe_0_0_4 # Maps special codepoints to human-readable names. UpperCamelCase = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. UpperCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class snake_case_ ( __A ): __A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : str , lowercase_ : Union[str, Any]=chr(lowercase_ ) , lowercase_ : Tuple=chr(lowercase_ ) , lowercase_ : str=chr(lowercase_ ) , lowercase_ : Optional[int]=chr(lowercase_ ) , lowercase_ : List[Any]=chr(lowercase_ ) , lowercase_ : int=chr(lowercase_ ) , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=20_48 , **lowercase_ : Dict , ) -> List[str]: lowercase__ : Optional[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token lowercase__ : str = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token lowercase__ : Any = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token lowercase__ : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token lowercase__ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase__ : Optional[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , model_max_length=lowercase_ , **lowercase_ , ) # Creates a mapping for looking up the IDs of special symbols. lowercase__ : Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowercase__ : Any = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowercase__ : Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowercase__ : Dict = UNICODE_VOCAB_SIZE lowercase__ : Optional[Any] = len(self._special_codepoints ) @property def __UpperCamelCase ( self : Tuple ) -> int: return self._unicode_vocab_size def __UpperCamelCase ( self : str , lowercase_ : str ) -> List[str]: return list(lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : str ) -> int: try: return ord(lowercase_ ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def __UpperCamelCase ( self : int , lowercase_ : int ) -> str: try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(lowercase_ ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def __UpperCamelCase ( self : Any , lowercase_ : List[str] ) -> List[str]: return "".join(lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]: lowercase__ : Optional[int] = [self.sep_token_id] lowercase__ : Dict = [self.cls_token_id] lowercase__ : str = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def __UpperCamelCase ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ ) lowercase__ : Tuple = [1] + ([0] * len(lowercase_ )) + [1] if token_ids_a is not None: result += ([0] * len(lowercase_ )) + [1] return result def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]: lowercase__ : Any = [self.sep_token_id] lowercase__ : Optional[int] = [self.cls_token_id] lowercase__ : Any = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def __UpperCamelCase ( self : Dict , lowercase_ : str , lowercase_ : Optional[str] = None ) -> List[str]: return ()
333
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__A ) class snake_case_ ( __A ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} ) __A : ClassVar[Features] = Features({"text": Value("string" )} ) __A : ClassVar[Features] = Features({"labels": ClassLabel} ) __A : str = "text" __A : str = "labels" def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int: if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) lowercase__ : Optional[int] = copy.deepcopy(self ) lowercase__ : Tuple = self.label_schema.copy() lowercase__ : Union[str, Any] = features[self.label_column] lowercase__ : int = label_schema return task_template @property def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
333
1
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : Optional[int] = RobertaPreLayerNormConfig.from_pretrained( _lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"]) # convert state_dict lowercase__ : List[str] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin")) lowercase__ : Tuple = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta."): lowercase__ : Dict = "roberta_prelayernorm." + tensor_key[len("roberta.") :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight") or tensor_key.endswith(".self.LayerNorm.bias"): continue lowercase__ : Dict = tensor_value lowercase__ : Optional[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase) model.save_pretrained(_lowerCamelCase) # convert tokenizer lowercase__ : Tuple = AutoTokenizer.from_pretrained(_lowerCamelCase) tokenizer.save_pretrained(_lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
333
def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True): assert ( isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") return min_val if option else max_val def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): return int((number_a + number_a) / 2) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int): assert ( isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)") if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value") def answer(_lowerCamelCase : int) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started...") lowercase__ : Optional[int] = lower lowercase__ : List[Any] = higher lowercase__ : Dict = [] while True: lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase) last_numbers.append(_lowerCamelCase) if answer(_lowerCamelCase) == "low": lowercase__ : List[str] = number elif answer(_lowerCamelCase) == "high": lowercase__ : Optional[int] = number else: break print(f'''guess the number : {last_numbers[-1]}''') print(f'''details : {last_numbers!s}''') def lowercase_ ( ): lowercase__ : Tuple = int(input("Enter lower value : ").strip()) lowercase__ : Optional[int] = int(input("Enter high value : ").strip()) lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip()) guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) if __name__ == "__main__": main()
333
1
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class snake_case_ : def __init__( self : Any , lowercase_ : Optional[Any] , lowercase_ : Dict=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Dict=True , lowercase_ : Any=True , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=99 , lowercase_ : int=64 , lowercase_ : List[Any]=32 , lowercase_ : List[Any]=5 , lowercase_ : Optional[Any]=4 , lowercase_ : Dict=37 , lowercase_ : int="gelu" , lowercase_ : Any=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Union[str, Any]=5_12 , lowercase_ : str=16 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=3 , lowercase_ : Tuple=4 , lowercase_ : Any=None , ) -> List[str]: lowercase__ : List[str] = parent lowercase__ : Dict = batch_size lowercase__ : str = seq_length lowercase__ : Any = is_training lowercase__ : Tuple = use_input_mask lowercase__ : List[str] = use_token_type_ids lowercase__ : int = use_labels lowercase__ : List[str] = vocab_size lowercase__ : int = hidden_size lowercase__ : List[str] = embedding_size lowercase__ : Tuple = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : Tuple = hidden_act lowercase__ : Optional[int] = hidden_dropout_prob lowercase__ : Tuple = attention_probs_dropout_prob lowercase__ : str = max_position_embeddings lowercase__ : Optional[Any] = type_vocab_size lowercase__ : Dict = type_sequence_label_size lowercase__ : Union[str, Any] = initializer_range lowercase__ : List[Any] = num_labels lowercase__ : Union[str, Any] = num_choices lowercase__ : Optional[int] = scope def __UpperCamelCase ( self : List[str] ) -> Dict: lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_input_mask: lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Dict = None if self.use_token_type_ids: lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : Optional[int] = None lowercase__ : List[str] = None lowercase__ : Optional[Any] = None if self.use_labels: lowercase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , ) def __UpperCamelCase ( self : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str ) -> Any: lowercase__ : Optional[Any] = MegatronBertModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Any = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ ) lowercase__ : List[str] = model(lowercase_ , token_type_ids=lowercase_ ) lowercase__ : List[str] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : str , lowercase_ : Optional[Any] ) -> Optional[int]: lowercase__ : Dict = MegatronBertForMaskedLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Dict = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Union[str, Any] ) -> Union[str, Any]: lowercase__ : Tuple = MegatronBertForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Any ) -> Any: lowercase__ : Tuple = MegatronBertForNextSentencePrediction(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Optional[Any] = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ) -> str: lowercase__ : List[Any] = MegatronBertForPreTraining(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : str = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , next_sentence_label=lowercase_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __UpperCamelCase ( self : Tuple , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> Dict: lowercase__ : List[Any] = MegatronBertForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Any = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[Any] ) -> Tuple: lowercase__ : Optional[Any] = self.num_labels lowercase__ : List[Any] = MegatronBertForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[Any] ) -> str: lowercase__ : List[Any] = self.num_labels lowercase__ : Optional[int] = MegatronBertForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase ( self : int , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> Union[str, Any]: lowercase__ : Dict = self.num_choices lowercase__ : Dict = MegatronBertForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : List[Any] = model( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase ( self : Any ) -> Optional[int]: lowercase__ : List[str] = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : Optional[int] = config_and_inputs lowercase__ : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case_ ( __A ,__A ,unittest.TestCase ): __A : int = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) __A : str = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) __A : Optional[int] = True # test_resize_embeddings = False __A : int = False def __UpperCamelCase ( self : int , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : str=False ) -> Union[str, Any]: lowercase__ : List[str] = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) if return_labels: if model_class in get_values(lowercase_ ): lowercase__ : Dict = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ ) lowercase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def __UpperCamelCase ( self : List[Any] ) -> Tuple: lowercase__ : List[str] = MegatronBertModelTester(self ) lowercase__ : str = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def __UpperCamelCase ( self : Tuple ) -> int: self.config_tester.run_common_tests() def __UpperCamelCase ( self : List[Any] ) -> str: lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*lowercase_ ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase_ ) def __UpperCamelCase ( self : int ) -> Dict: lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase_ ) def __UpperCamelCase ( self : Dict ) -> Any: lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase_ ) def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase_ ) def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase_ ) def __UpperCamelCase ( self : List[Any] ) -> Optional[int]: lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase_ ) def lowercase_ ( _lowerCamelCase : Dict): return torch.tensor( _lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , ) UpperCamelCase = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class snake_case_ ( unittest.TestCase ): @slow @unittest.skip("Model is not available." ) def __UpperCamelCase ( self : str ) -> Any: lowercase__ : str = "nvidia/megatron-bert-uncased-345m" if "MYDIR" in os.environ: lowercase__ : Union[str, Any] = os.path.join(os.environ["MYDIR"] , lowercase_ ) lowercase__ : Any = MegatronBertModel.from_pretrained(lowercase_ ) model.to(lowercase_ ) model.half() lowercase__ : str = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): lowercase__ : Any = model(lowercase_ )[0] lowercase__ : Tuple = torch.Size((1, 9, 10_24) ) self.assertEqual(output.shape , lowercase_ ) lowercase__ : Dict = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28] for ii in range(3 ): for jj in range(3 ): lowercase__ : Dict = output[0, ii, jj] lowercase__ : Tuple = expected[3 * ii + jj] lowercase__ : Dict = "ii={} jj={} a={} b={}".format(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) self.assertTrue(math.isclose(lowercase_ , lowercase_ , rel_tol=lowercase_ , abs_tol=lowercase_ ) , msg=lowercase_ )
333
import os import re import shutil import sys import tempfile import unittest import black UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. UpperCamelCase = ''' \""" Output class for the scheduler\'s step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None ''' class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : str ) -> List[str]: lowercase__ : str = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) ) lowercase__ : List[Any] = self.diffusers_dir shutil.copy( os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , ) def __UpperCamelCase ( self : Optional[int] ) -> List[str]: lowercase__ : Dict = "src/diffusers" shutil.rmtree(self.diffusers_dir ) def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple: lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ ) lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" ) with open(lowercase_ , "w" , newline="\n" ) as f: f.write(lowercase_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowercase_ ) with open(lowercase_ , "r" ) as f: self.assertTrue(f.read() , lowercase_ ) def __UpperCamelCase ( self : str ) -> Optional[int]: lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" ) self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : int ) -> str: # Base copy consistency self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , ) # Copy consistency with a really long name lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
333
1
import requests from bsa import BeautifulSoup def lowercase_ ( _lowerCamelCase : str = "AAPL"): lowercase__ : int = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' lowercase__ : Optional[Any] = BeautifulSoup(requests.get(_lowerCamelCase).text , "html.parser") lowercase__ : List[str] = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" , class_=class_).find("span").text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
333
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): for param, grad_param in zip(model_a.parameters() , model_b.parameters()): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True): model.train() lowercase__ : Tuple = model(_lowerCamelCase) lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device)) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False): set_seed(42) lowercase__ : Dict = RegressionModel() lowercase__ : int = deepcopy(_lowerCamelCase) lowercase__ : str = RegressionDataset(length=80) lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16) model.to(accelerator.device) if sched: lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3) lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3) lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65) lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65) # Make a copy of `model` if sched: lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowercase_ ( _lowerCamelCase : Tuple): # Test when on a single CPU or GPU that the context manager does nothing lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase) # Use a single batch lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values() for iteration in range(3): # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: # Sync grads step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))] def lowercase_ ( _lowerCamelCase : Any): # Test on distributed setup that context manager behaves properly lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase) # Use a single batch lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values() for iteration in range(3): # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: # Sync grads step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))] def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False): lowercase__ : int = Accelerator( split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2) # Test that context manager behaves properly lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase) for iteration, batch in enumerate(_lowerCamelCase): lowercase__ , lowercase__ : str = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Do "gradient accumulation" (noop) with accelerator.accumulate(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters()): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))] GradientState._reset_state() def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False): lowercase__ : Dict = Accelerator( split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2) # Test that context manager behaves properly lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase) for iteration, batch in enumerate(_lowerCamelCase): lowercase__ , lowercase__ : Any = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target)) lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)): if split_batches: sched.step() else: for _ in range(accelerator.num_processes): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_lowerCamelCase): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase)) if accelerator.num_processes > 1: check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) GradientState._reset_state() def lowercase_ ( ): lowercase__ : List[str] = Accelerator() lowercase__ : List[Any] = RegressionDataset(length=80) lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16) lowercase__ : int = RegressionDataset(length=96) lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16) lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_lowerCamelCase): assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase) if iteration < len(_lowerCamelCase) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_lowerCamelCase): assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase) if batch_num < len(_lowerCamelCase) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowercase_ ( ): lowercase__ : str = Accelerator() lowercase__ : Dict = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**") test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**") test_noop_sync(_lowerCamelCase) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**") test_distributed_sync(_lowerCamelCase) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Any): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
333
1
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = [ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def lowercase_ ( _lowerCamelCase : Union[str, Any]): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: lowercase__ : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase) if k.startswith("encoder"): lowercase__ : Tuple = k.replace(".attn" , ".self_attn") lowercase__ : Optional[Any] = k.replace("norm1" , "self_attn_layer_norm") lowercase__ : Optional[Any] = k.replace("norm2" , "final_layer_norm") elif k.startswith("decoder"): lowercase__ : Dict = k.replace("norm1" , "self_attn_layer_norm") lowercase__ : Tuple = k.replace("norm2" , "encoder_attn_layer_norm") lowercase__ : Any = k.replace("norm3" , "final_layer_norm") return k def lowercase_ ( _lowerCamelCase : Optional[Any]): lowercase__ : List[str] = [ "model.encoder.layernorm_embedding.weight", "model.encoder.layernorm_embedding.bias", "model.decoder.layernorm_embedding.weight", "model.decoder.layernorm_embedding.bias", ] for k in keys: lowercase__ : Optional[int] = sd.pop(_lowerCamelCase) lowercase__ : Optional[Any] = k.replace("layernorm_embedding" , "layer_norm") assert new_k not in sd lowercase__ : Optional[int] = v UpperCamelCase = ['''START'''] @torch.no_grad() def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Any): lowercase__ : Tuple = torch.load(_lowerCamelCase , map_location="cpu") lowercase__ : Dict = model["model"] lowercase__ : Optional[Any] = BlenderbotConfig.from_json_file(_lowerCamelCase) lowercase__ : Any = BlenderbotForConditionalGeneration(_lowerCamelCase) lowercase__ : Optional[Any] = m.model.state_dict().keys() lowercase__ : Optional[int] = [] lowercase__ : Any = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue lowercase__ : Optional[Any] = rename_state_dict_key(_lowerCamelCase) if new_k not in valid_keys: failures.append([k, new_k]) else: lowercase__ : int = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(_lowerCamelCase) m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase) m.half() m.save_pretrained(_lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) UpperCamelCase = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
333
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase) lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase) lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase) lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": lowercase__ : Any = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": lowercase__ : int = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Dict = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global].") # Encoder for layer_index in range(config.num_layers): lowercase__ : str = f'''layers_{str(_lowerCamelCase)}''' # Self-Attention lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"] lowercase__ : Any = tax_attention_key lowercase__ : Any = tax_attention_out lowercase__ : Any = tax_attention_query lowercase__ : List[str] = tax_attention_value lowercase__ : List[str] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Any = tax_global_layer_norm if split_mlp_wi: lowercase__ : Tuple = tax_mlp_wi_a lowercase__ : str = tax_mlp_wi_a else: lowercase__ : List[Any] = tax_mlp_wi lowercase__ : str = tax_mlp_wo lowercase__ : int = tax_mlp_layer_norm lowercase__ : List[str] = flax_model_encoder_layer_block # Only for layer 0: lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T lowercase__ : Optional[int] = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T lowercase__ : str = tax_encoder_global_rel_embedding # Assigning lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"] lowercase__ : Union[str, Any] = tax_encoder_norm # Decoder for layer_index in range(config.num_layers): lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}''' # Self-Attention lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"] lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"] lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"] lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"] # Layer Normalization lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"] lowercase__ : Any = tax_attention_key lowercase__ : List[Any] = tax_attention_out lowercase__ : Any = tax_attention_query lowercase__ : List[Any] = tax_attention_value lowercase__ : List[str] = tax_pre_attention_layer_norm lowercase__ : List[Any] = tax_enc_dec_attention_key lowercase__ : Optional[Any] = tax_enc_dec_attention_out lowercase__ : str = tax_enc_dec_attention_query lowercase__ : Union[str, Any] = tax_enc_dec_attention_value lowercase__ : Tuple = tax_cross_layer_norm if split_mlp_wi: lowercase__ : List[str] = tax_mlp_wi_a lowercase__ : List[Any] = tax_mlp_wi_a else: lowercase__ : Tuple = tax_mlp_wi lowercase__ : Any = tax_mlp_wo lowercase__ : Tuple = txa_mlp_layer_norm lowercase__ : int = flax_model_decoder_layer_block # Decoder Normalization lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"] lowercase__ : List[Any] = txa_decoder_norm # Only for layer 0: lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T lowercase__ : str = tax_decoder_rel_embedding # Token Embeddings lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"] lowercase__ : Optional[Any] = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(_lowerCamelCase) print("T5X Model was sucessfully converted!") if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) UpperCamelCase = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
333
1
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class snake_case_ ( __A ): __A : Dict = (DDIMParallelScheduler,) __A : int = (("eta", 0.0), ("num_inference_steps", 50)) def __UpperCamelCase ( self : Optional[Any] , **lowercase_ : str ) -> Union[str, Any]: lowercase__ : Dict = { "num_train_timesteps": 10_00, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**lowercase_ ) return config def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[Any] ) -> int: lowercase__ : Any = self.scheduler_classes[0] lowercase__ : Any = self.get_scheduler_config(**lowercase_ ) lowercase__ : Optional[int] = scheduler_class(**lowercase_ ) lowercase__ , lowercase__ : Any = 10, 0.0 lowercase__ : List[str] = self.dummy_model() lowercase__ : Optional[int] = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for t in scheduler.timesteps: lowercase__ : List[Any] = model(lowercase_ , lowercase_ ) lowercase__ : str = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample return sample def __UpperCamelCase ( self : str ) -> Dict: for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=lowercase_ ) def __UpperCamelCase ( self : List[str] ) -> List[Any]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_ ) lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : str = self.get_scheduler_config(steps_offset=1 ) lowercase__ : List[Any] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def __UpperCamelCase ( self : List[str] ) -> Dict: for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ ) def __UpperCamelCase ( self : str ) -> Tuple: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def __UpperCamelCase ( self : int ) -> int: for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase_ ) def __UpperCamelCase ( self : List[str] ) -> List[Any]: for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=lowercase_ ) def __UpperCamelCase ( self : List[str] ) -> List[str]: for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: self.check_over_configs(thresholding=lowercase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , ) def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]: for t in [1, 10, 49]: self.check_over_forward(time_step=lowercase_ ) def __UpperCamelCase ( self : int ) -> List[Any]: for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ): self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ ) def __UpperCamelCase ( self : str ) -> Optional[Any]: for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=lowercase_ , eta=lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: lowercase__ : Dict = self.scheduler_classes[0] lowercase__ : int = self.get_scheduler_config() lowercase__ : Optional[int] = scheduler_class(**lowercase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5 def __UpperCamelCase ( self : Tuple ) -> Dict: lowercase__ : List[Any] = self.scheduler_classes[0] lowercase__ : Optional[int] = self.get_scheduler_config() lowercase__ : str = scheduler_class(**lowercase_ ) lowercase__ , lowercase__ : Dict = 10, 0.0 scheduler.set_timesteps(lowercase_ ) lowercase__ : int = self.dummy_model() lowercase__ : Union[str, Any] = self.dummy_sample_deter lowercase__ : Optional[Any] = self.dummy_sample_deter + 0.1 lowercase__ : int = self.dummy_sample_deter - 0.1 lowercase__ : Any = samplea.shape[0] lowercase__ : Any = torch.stack([samplea, samplea, samplea] , dim=0 ) lowercase__ : List[Any] = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ ) lowercase__ : Any = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) lowercase__ : List[Any] = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ ) lowercase__ : Union[str, Any] = torch.sum(torch.abs(lowercase_ ) ) lowercase__ : List[str] = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2 assert abs(result_mean.item() - 0.49_82 ) < 1E-3 def __UpperCamelCase ( self : List[str] ) -> Dict: lowercase__ : Dict = self.full_loop() lowercase__ : Dict = torch.sum(torch.abs(lowercase_ ) ) lowercase__ : int = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3 def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : List[str] = self.full_loop(prediction_type="v_prediction" ) lowercase__ : List[str] = torch.sum(torch.abs(lowercase_ ) ) lowercase__ : str = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 52.53_02 ) < 1E-2 assert abs(result_mean.item() - 0.06_84 ) < 1E-3 def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: # We specify different beta, so that the first alpha is 0.99 lowercase__ : Optional[Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) lowercase__ : int = torch.sum(torch.abs(lowercase_ ) ) lowercase__ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2 assert abs(result_mean.item() - 0.19_51 ) < 1E-3 def __UpperCamelCase ( self : List[Any] ) -> List[Any]: # We specify different beta, so that the first alpha is 0.99 lowercase__ : List[Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) lowercase__ : str = torch.sum(torch.abs(lowercase_ ) ) lowercase__ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2 assert abs(result_mean.item() - 0.19_41 ) < 1E-3
333
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class snake_case_ ( __A ): __A : Optional[int] = "rwkv" __A : List[str] = {"max_position_embeddings": "context_length"} def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int: lowercase__ : List[str] = vocab_size lowercase__ : str = context_length lowercase__ : List[Any] = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size lowercase__ : List[Any] = layer_norm_epsilon lowercase__ : str = rescale_every lowercase__ : Optional[int] = use_cache lowercase__ : int = bos_token_id lowercase__ : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
333
1
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Optional[int]=None): lowercase__ : int = None if token is not None: lowercase__ : Any = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''} lowercase__ : Optional[Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' lowercase__ : int = requests.get(_lowerCamelCase , headers=_lowerCamelCase).json() lowercase__ : str = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]}) lowercase__ : Tuple = math.ceil((result["total_count"] - 100) / 100) for i in range(_lowerCamelCase): lowercase__ : Optional[int] = requests.get(url + f'''&page={i + 2}''' , headers=_lowerCamelCase).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]}) return job_links except Exception: print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''') return {} def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any]=None): lowercase__ : Dict = None if token is not None: lowercase__ : Any = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''} lowercase__ : Union[str, Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100''' lowercase__ : str = requests.get(_lowerCamelCase , headers=_lowerCamelCase).json() lowercase__ : List[str] = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]}) lowercase__ : str = math.ceil((result["total_count"] - 100) / 100) for i in range(_lowerCamelCase): lowercase__ : Any = requests.get(url + f'''&page={i + 2}''' , headers=_lowerCamelCase).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]}) return artifacts except Exception: print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''') return {} def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Dict): lowercase__ : Optional[Any] = None if token is not None: lowercase__ : Tuple = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''} lowercase__ : Any = requests.get(_lowerCamelCase , headers=_lowerCamelCase , allow_redirects=_lowerCamelCase) lowercase__ : List[Any] = result.headers["Location"] lowercase__ : str = requests.get(_lowerCamelCase , allow_redirects=_lowerCamelCase) lowercase__ : str = os.path.join(_lowerCamelCase , f'''{artifact_name}.zip''') with open(_lowerCamelCase , "wb") as fp: fp.write(response.content) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Dict=None): lowercase__ : str = [] lowercase__ : str = [] lowercase__ : List[str] = None with zipfile.ZipFile(_lowerCamelCase) as z: for filename in z.namelist(): if not os.path.isdir(_lowerCamelCase): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(_lowerCamelCase) as f: for line in f: lowercase__ : Any = line.decode("UTF-8").strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs lowercase__ : List[str] = line[: line.index(": ")] lowercase__ : Tuple = line[line.index(": ") + len(": ") :] errors.append([error_line, error]) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED "): # `test` is the test method that failed lowercase__ : Union[str, Any] = line[len("FAILED ") :] failed_tests.append(_lowerCamelCase) elif filename == "job_name.txt": lowercase__ : List[str] = line if len(_lowerCamelCase) != len(_lowerCamelCase): raise ValueError( f'''`errors` and `failed_tests` should have the same number of elements. Got {len(_lowerCamelCase)} for `errors` ''' f'''and {len(_lowerCamelCase)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some''' " problem.") lowercase__ : Any = None if job_name and job_links: lowercase__ : Optional[Any] = job_links.get(_lowerCamelCase , _lowerCamelCase) # A list with elements of the form (line of error, error, failed test) lowercase__ : Any = [x + [y] + [job_link] for x, y in zip(_lowerCamelCase , _lowerCamelCase)] return result def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=None): lowercase__ : Union[str, Any] = [] lowercase__ : Optional[int] = [os.path.join(_lowerCamelCase , _lowerCamelCase) for p in os.listdir(_lowerCamelCase) if p.endswith(".zip")] for p in paths: errors.extend(get_errors_from_single_artifact(_lowerCamelCase , job_links=_lowerCamelCase)) return errors def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any=None): lowercase__ : List[str] = Counter() counter.update([x[1] for x in logs]) lowercase__ : Tuple = counter.most_common() lowercase__ : Union[str, Any] = {} for error, count in counts: if error_filter is None or error not in error_filter: lowercase__ : Union[str, Any] = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} lowercase__ : Dict = dict(sorted(r.items() , key=lambda _lowerCamelCase: item[1]["count"] , reverse=_lowerCamelCase)) return r def lowercase_ ( _lowerCamelCase : int): lowercase__ : List[Any] = test.split("::")[0] if test.startswith("tests/models/"): lowercase__ : Optional[Any] = test.split("/")[2] else: lowercase__ : List[Any] = None return test def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any=None): lowercase__ : Dict = [(x[0], x[1], get_model(x[2])) for x in logs] lowercase__ : Dict = [x for x in logs if x[2] is not None] lowercase__ : Optional[int] = {x[2] for x in logs} lowercase__ : Dict = {} for test in tests: lowercase__ : int = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test]) lowercase__ : str = counter.most_common() lowercase__ : Dict = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} lowercase__ : Any = sum(error_counts.values()) if n_errors > 0: lowercase__ : str = {"count": n_errors, "errors": error_counts} lowercase__ : Dict = dict(sorted(r.items() , key=lambda _lowerCamelCase: item[1]["count"] , reverse=_lowerCamelCase)) return r def lowercase_ ( _lowerCamelCase : Union[str, Any]): lowercase__ : str = "| no. | error | status |" lowercase__ : List[str] = "|-:|:-|:-|" lowercase__ : Union[str, Any] = [header, sep] for error in reduced_by_error: lowercase__ : Optional[int] = reduced_by_error[error]["count"] lowercase__ : Union[str, Any] = f'''| {count} | {error[:100]} | |''' lines.append(_lowerCamelCase) return "\n".join(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): lowercase__ : Tuple = "| model | no. of errors | major error | count |" lowercase__ : int = "|-:|-:|-:|-:|" lowercase__ : str = [header, sep] for model in reduced_by_model: lowercase__ : List[str] = reduced_by_model[model]["count"] lowercase__ , lowercase__ : Dict = list(reduced_by_model[model]["errors"].items())[0] lowercase__ : str = f'''| {model} | {count} | {error[:60]} | {_count} |''' lines.append(_lowerCamelCase) return "\n".join(_lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') UpperCamelCase = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) UpperCamelCase = get_job_links(args.workflow_run_id, token=args.token) UpperCamelCase = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: UpperCamelCase = k.find(''' / ''') UpperCamelCase = k[index + len(''' / ''') :] UpperCamelCase = v with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) UpperCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) UpperCamelCase = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error UpperCamelCase = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors UpperCamelCase = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) UpperCamelCase = reduce_by_error(errors) UpperCamelCase = reduce_by_model(errors) UpperCamelCase = make_github_table(reduced_by_error) UpperCamelCase = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa) with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa)
333
class snake_case_ : def __init__( self : int ) -> Optional[int]: lowercase__ : Optional[int] = 0 lowercase__ : List[str] = 0 lowercase__ : Any = {} def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]: if vertex not in self.adjacency: lowercase__ : List[Any] = {} self.num_vertices += 1 def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]: self.add_vertex(lowercase_ ) self.add_vertex(lowercase_ ) if head == tail: return lowercase__ : int = weight lowercase__ : Any = weight def __UpperCamelCase ( self : Dict ) -> Optional[int]: lowercase__ : List[Any] = self.get_edges() for edge in edges: lowercase__ , lowercase__ , lowercase__ : int = edge edges.remove((tail, head, weight) ) for i in range(len(lowercase_ ) ): lowercase__ : Tuple = list(edges[i] ) edges.sort(key=lambda lowercase_ : e[2] ) for i in range(len(lowercase_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: lowercase__ : int = edges[i][2] + 1 for edge in edges: lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge lowercase__ : Union[str, Any] = weight lowercase__ : Dict = weight def __str__( self : str ) -> Any: lowercase__ : str = "" for tail in self.adjacency: for head in self.adjacency[tail]: lowercase__ : Optional[Any] = self.adjacency[head][tail] string += F'''{head} -> {tail} == {weight}\n''' return string.rstrip("\n" ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: lowercase__ : Any = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def __UpperCamelCase ( self : List[str] ) -> Dict: return self.adjacency.keys() @staticmethod def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]: lowercase__ : Any = Graph() if vertices is None: lowercase__ : str = [] if edges is None: lowercase__ : List[Any] = [] for vertex in vertices: g.add_vertex(lowercase_ ) for edge in edges: g.add_edge(*lowercase_ ) return g class snake_case_ : def __init__( self : int ) -> List[str]: lowercase__ : Dict = {} lowercase__ : Tuple = {} def __len__( self : Union[str, Any] ) -> Union[str, Any]: return len(self.parent ) def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple: if item in self.parent: return self.find(lowercase_ ) lowercase__ : Union[str, Any] = item lowercase__ : int = 0 return item def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any: if item not in self.parent: return self.make_set(lowercase_ ) if item != self.parent[item]: lowercase__ : Union[str, Any] = self.find(self.parent[item] ) return self.parent[item] def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]: lowercase__ : Dict = self.find(lowercase_ ) lowercase__ : Optional[int] = self.find(lowercase_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: lowercase__ : Dict = roota return roota if self.rank[roota] < self.rank[roota]: lowercase__ : int = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 lowercase__ : Tuple = roota return roota return None @staticmethod def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]: lowercase__ : List[Any] = graph.num_vertices lowercase__ : Optional[Any] = Graph.UnionFind() lowercase__ : int = [] while num_components > 1: lowercase__ : List[Any] = {} for vertex in graph.get_vertices(): lowercase__ : Any = -1 lowercase__ : List[str] = graph.get_edges() for edge in edges: lowercase__ , lowercase__ , lowercase__ : str = edge edges.remove((tail, head, weight) ) for edge in edges: lowercase__ , lowercase__ , lowercase__ : List[str] = edge lowercase__ : List[str] = union_find.find(lowercase_ ) lowercase__ : Union[str, Any] = union_find.find(lowercase_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowercase__ : int = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowercase__ : Dict = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex] if union_find.find(lowercase_ ) != union_find.find(lowercase_ ): union_find.union(lowercase_ , lowercase_ ) mst_edges.append(cheap_edge[vertex] ) lowercase__ : Optional[Any] = num_components - 1 lowercase__ : List[Any] = Graph.build(edges=lowercase_ ) return mst
333
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=False): lowercase__ : int = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''')) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''')) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''')) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''')) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''')) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''')) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''')) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''')) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''')) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''')) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ]) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ]) # if just the base model, we should remove "deit" from all keys that start with "deit" lowercase__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit") else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ]) return rename_keys def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=False): for i in range(config.num_hidden_layers): if base_model: lowercase__ : Optional[int] = "" else: lowercase__ : Union[str, Any] = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Dict = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''') lowercase__ : str = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''') # next, add query, keys and values (in that order) to the state dict lowercase__ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase__ : Any = in_proj_bias[: config.hidden_size] lowercase__ : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Union[str, Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : Optional[int] = in_proj_weight[ -config.hidden_size :, : ] lowercase__ : Dict = in_proj_bias[-config.hidden_size :] def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]): lowercase__ : Any = dct.pop(_lowerCamelCase) lowercase__ : List[Any] = val def lowercase_ ( ): lowercase__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw) return im @torch.no_grad() def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple): lowercase__ : Optional[Any] = DeiTConfig() # all deit models have fine-tuned heads lowercase__ : List[str] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size lowercase__ : Optional[int] = 1000 lowercase__ : Dict = "huggingface/label-files" lowercase__ : Dict = "imagenet-1k-id2label.json" lowercase__ : Any = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset") , "r")) lowercase__ : List[Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : List[str] = idalabel lowercase__ : Any = {v: k for k, v in idalabel.items()} lowercase__ : List[str] = int(deit_name[-6:-4]) lowercase__ : Dict = int(deit_name[-3:]) # size of the architecture if deit_name[9:].startswith("tiny"): lowercase__ : Union[str, Any] = 192 lowercase__ : Dict = 768 lowercase__ : Any = 12 lowercase__ : List[str] = 3 elif deit_name[9:].startswith("small"): lowercase__ : Tuple = 384 lowercase__ : List[Any] = 1536 lowercase__ : List[Any] = 12 lowercase__ : str = 6 if deit_name[9:].startswith("base"): pass elif deit_name[4:].startswith("large"): lowercase__ : Tuple = 1024 lowercase__ : str = 4096 lowercase__ : List[Any] = 24 lowercase__ : Optional[Any] = 16 # load original model from timm lowercase__ : Any = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase) timm_model.eval() # load state_dict of original model, remove and rename some keys lowercase__ : Optional[int] = timm_model.state_dict() lowercase__ : List[str] = create_rename_keys(_lowerCamelCase , _lowerCamelCase) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # load HuggingFace model lowercase__ : Union[str, Any] = DeiTForImageClassificationWithTeacher(_lowerCamelCase).eval() model.load_state_dict(_lowerCamelCase) # Check outputs on an image, prepared by DeiTImageProcessor lowercase__ : str = int( (256 / 224) * config.image_size) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 lowercase__ : Union[str, Any] = DeiTImageProcessor(size=_lowerCamelCase , crop_size=config.image_size) lowercase__ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="pt") lowercase__ : Optional[Any] = encoding["pixel_values"] lowercase__ : int = model(_lowerCamelCase) lowercase__ : List[str] = timm_model(_lowerCamelCase) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1E-3) Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''') model.save_pretrained(_lowerCamelCase) print(f'''Saving image processor to {pytorch_dump_folder_path}''') image_processor.save_pretrained(_lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--deit_name''', default='''vit_deit_base_distilled_patch16_224''', type=str, help='''Name of the DeiT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
333
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[Any] = DPTConfig() if "large" in checkpoint_url: lowercase__ : str = 1024 lowercase__ : List[str] = 4096 lowercase__ : List[Any] = 24 lowercase__ : Dict = 16 lowercase__ : Union[str, Any] = [5, 11, 17, 23] lowercase__ : Any = [256, 512, 1024, 1024] lowercase__ : Optional[int] = (1, 384, 384) if "ade" in checkpoint_url: lowercase__ : Union[str, Any] = True lowercase__ : Tuple = 150 lowercase__ : Optional[int] = "huggingface/label-files" lowercase__ : str = "ade20k-id2label.json" lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Optional[Any] = idalabel lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()} lowercase__ : Tuple = [1, 150, 480, 480] return config, expected_shape def lowercase_ ( _lowerCamelCase : List[Any]): lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Tuple): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder") if "pretrained.model" in name: lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings") if "patch_embed" in name: lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings") if "pos_embed" in name: lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings") if "attn.proj" in name: lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense") if "proj" in name and "project" not in name: lowercase__ : int = name.replace("proj" , "projection") if "blocks" in name: lowercase__ : List[str] = name.replace("blocks" , "layer") if "mlp.fc1" in name: lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense") if "mlp.fc2" in name: lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense") if "norm1" in name: lowercase__ : List[str] = name.replace("norm1" , "layernorm_before") if "norm2" in name: lowercase__ : Dict = name.replace("norm2" , "layernorm_after") if "scratch.output_conv" in name: lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head") if "scratch" in name: lowercase__ : str = name.replace("scratch" , "neck") if "layer1_rn" in name: lowercase__ : int = name.replace("layer1_rn" , "convs.0") if "layer2_rn" in name: lowercase__ : int = name.replace("layer2_rn" , "convs.1") if "layer3_rn" in name: lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2") if "layer4_rn" in name: lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3") if "refinenet" in name: lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1]) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''') if "out_conv" in name: lowercase__ : str = name.replace("out_conv" , "projection") if "resConfUnit1" in name: lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1") if "resConfUnit2" in name: lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2") if "conv1" in name: lowercase__ : List[Any] = name.replace("conv1" , "convolution1") if "conv2" in name: lowercase__ : Tuple = name.replace("conv2" , "convolution2") # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0") if "pretrained.act_postprocess2.0.project.0" in name: lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0") if "pretrained.act_postprocess3.0.project.0" in name: lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0") if "pretrained.act_postprocess4.0.project.0" in name: lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0") # resize blocks if "pretrained.act_postprocess1.3" in name: lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection") if "pretrained.act_postprocess1.4" in name: lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize") if "pretrained.act_postprocess2.3" in name: lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection") if "pretrained.act_postprocess2.4" in name: lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize") if "pretrained.act_postprocess3.3" in name: lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection") if "pretrained.act_postprocess4.3" in name: lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection") if "pretrained.act_postprocess4.4" in name: lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize") if "pretrained" in name: lowercase__ : Any = name.replace("pretrained" , "dpt") if "bn" in name: lowercase__ : str = name.replace("bn" , "batch_norm") if "head" in name: lowercase__ : Optional[Any] = name.replace("head" , "head.head") if "encoder.norm" in name: lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm") if "auxlayer" in name: lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head") return name def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''') lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''') # next, add query, keys and values (in that order) to the state dict lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :] lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size] lowercase__ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] lowercase__ : int = in_proj_bias[-config.hidden_size :] def lowercase_ ( ): lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw) return im @torch.no_grad() def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict): lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase) # load original state_dict from URL lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu") # remove certain keys remove_ignore_keys_(_lowerCamelCase) # rename keys for key in state_dict.copy().keys(): lowercase__ : List[str] = state_dict.pop(_lowerCamelCase) lowercase__ : List[Any] = val # read in qkv matrices read_in_q_k_v(_lowerCamelCase , _lowerCamelCase) # load HuggingFace model lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase) model.load_state_dict(_lowerCamelCase) model.eval() # Check outputs on an image lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384 lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase) lowercase__ : List[str] = prepare_img() lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt") # forward pass lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth # Assert logits lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]]) if "ade" in checkpoint_url: lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]]) assert outputs.shape == torch.Size(_lowerCamelCase) assert ( torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase) ) Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase) print(f'''Saving model to {pytorch_dump_folder_path}''') model.save_pretrained(_lowerCamelCase) print(f'''Saving image processor to {pytorch_dump_folder_path}''') image_processor.save_pretrained(_lowerCamelCase) if push_to_hub: print("Pushing model to hub...") model.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) UpperCamelCase = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
333
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Union[str, Any] ) -> str: lowercase__ : Dict = tempfile.mkdtemp() # fmt: off lowercase__ : str = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowercase__ : List[str] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase__ : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowercase__ : Union[str, Any] = {"unk_token": "<unk>"} lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) lowercase__ : Tuple = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], "image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , lowercase_ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : int , **lowercase_ : List[str] ) -> List[Any]: return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> int: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : Optional[int] , **lowercase_ : Tuple ) -> List[Any]: return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : int ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def __UpperCamelCase ( self : Dict ) -> Optional[Any]: lowercase__ : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] lowercase__ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCamelCase ( self : Optional[int] ) -> Any: lowercase__ : str = self.get_tokenizer() lowercase__ : List[Any] = self.get_rust_tokenizer() lowercase__ : str = self.get_image_processor() lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor_slow.save_pretrained(self.tmpdirname ) lowercase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ ) lowercase__ : List[Any] = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor_fast.save_pretrained(self.tmpdirname ) lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowercase_ ) self.assertIsInstance(processor_fast.tokenizer , lowercase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowercase_ ) self.assertIsInstance(processor_fast.image_processor , lowercase_ ) def __UpperCamelCase ( self : Dict ) -> Dict: lowercase__ : int = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase__ : Tuple = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) lowercase__ : str = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def __UpperCamelCase ( self : str ) -> Optional[int]: lowercase__ : str = self.get_image_processor() lowercase__ : int = self.get_tokenizer() lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase__ : int = self.prepare_image_inputs() lowercase__ : Dict = image_processor(lowercase_ , return_tensors="np" ) lowercase__ : Optional[Any] = processor(images=lowercase_ , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCamelCase ( self : Any ) -> Union[str, Any]: lowercase__ : Any = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase__ : int = "lower newer" lowercase__ : Tuple = processor(text=lowercase_ ) lowercase__ : Optional[Any] = tokenizer(lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: lowercase__ : List[Any] = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase__ : Any = "lower newer" lowercase__ : int = self.prepare_image_inputs() lowercase__ : str = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def __UpperCamelCase ( self : str ) -> str: lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : int = self.get_tokenizer() lowercase__ : Optional[int] = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : List[str] = processor.batch_decode(lowercase_ ) lowercase__ : Dict = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> str: lowercase__ : Union[str, Any] = self.get_image_processor() lowercase__ : Optional[int] = self.get_tokenizer() lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase__ : Optional[Any] = "lower newer" lowercase__ : str = self.prepare_image_inputs() lowercase__ : List[str] = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
333
def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000): lowercase__ : Union[str, Any] = 1 lowercase__ : int = 0 for divide_by_number in range(_lowerCamelCase , digit + 1): lowercase__ : list[int] = [] lowercase__ : Dict = numerator for _ in range(1 , digit + 1): if now_divide in has_been_divided: if longest_list_length < len(_lowerCamelCase): lowercase__ : Union[str, Any] = len(_lowerCamelCase) lowercase__ : Optional[int] = divide_by_number else: has_been_divided.append(_lowerCamelCase) lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
333
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class snake_case_ ( __A ,__A ): __A : str = "nat" __A : Optional[int] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Dict , lowercase_ : int=4 , lowercase_ : Tuple=3 , lowercase_ : Dict=64 , lowercase_ : str=[3, 4, 6, 5] , lowercase_ : List[str]=[2, 4, 8, 16] , lowercase_ : int=7 , lowercase_ : int=3.0 , lowercase_ : Optional[int]=True , lowercase_ : int=0.0 , lowercase_ : int=0.0 , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[str]="gelu" , lowercase_ : int=0.02 , lowercase_ : Optional[Any]=1E-5 , lowercase_ : Optional[Any]=0.0 , lowercase_ : List[Any]=None , lowercase_ : Dict=None , **lowercase_ : Any , ) -> Tuple: super().__init__(**lowercase_ ) lowercase__ : Dict = patch_size lowercase__ : Tuple = num_channels lowercase__ : List[Any] = embed_dim lowercase__ : Dict = depths lowercase__ : int = len(lowercase_ ) lowercase__ : Optional[Any] = num_heads lowercase__ : Dict = kernel_size lowercase__ : Optional[Any] = mlp_ratio lowercase__ : Union[str, Any] = qkv_bias lowercase__ : Optional[int] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : Optional[int] = drop_path_rate lowercase__ : str = hidden_act lowercase__ : str = layer_norm_eps lowercase__ : Any = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase__ : int = int(embed_dim * 2 ** (len(lowercase_ ) - 1) ) lowercase__ : int = layer_scale_init_value lowercase__ : int = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )] lowercase__ , lowercase__ : Tuple = get_aligned_output_features_output_indices( out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
333
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ): __A : int = StableUnCLIPPipeline __A : int = TEXT_TO_IMAGE_PARAMS __A : Any = TEXT_TO_IMAGE_BATCH_PARAMS __A : int = TEXT_TO_IMAGE_IMAGE_PARAMS __A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __A : int = False def __UpperCamelCase ( self : Optional[int] ) -> List[str]: lowercase__ : str = 32 lowercase__ : Any = embedder_hidden_size # prior components torch.manual_seed(0 ) lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowercase__ : List[str] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) lowercase__ : Any = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , ) torch.manual_seed(0 ) lowercase__ : Union[str, Any] = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ ) lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowercase__ : Tuple = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) lowercase__ : str = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , ) torch.manual_seed(0 ) lowercase__ : Any = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , ) torch.manual_seed(0 ) lowercase__ : List[str] = AutoencoderKL() lowercase__ : List[Any] = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any: if str(lowercase_ ).startswith("mps" ): lowercase__ : Any = torch.manual_seed(lowercase_ ) else: lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase__ : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: lowercase__ : Union[str, Any] = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ ) def __UpperCamelCase ( self : List[Any] ) -> List[str]: lowercase__ : str = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowercase_ ) @slow @require_torch_gpu class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Tuple ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : int ) -> int: lowercase__ : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" ) lowercase__ : Optional[int] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) lowercase__ : int = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase__ : str = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) lowercase__ : Any = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
333
1
from __future__ import annotations class snake_case_ : def __init__( self : Optional[Any] , lowercase_ : list[list[int]] ) -> Dict: lowercase__ : Optional[Any] = TypeError( "Matrices must be formed from a list of zero or more lists containing at " "least one and the same number of values, each of which must be of type " "int or float." ) if len(lowercase_ ) != 0: lowercase__ : Union[str, Any] = len(rows[0] ) if cols == 0: raise error for row in rows: if len(lowercase_ ) != cols: raise error for value in row: if not isinstance(lowercase_ , (int, float) ): raise error lowercase__ : List[Any] = rows else: lowercase__ : Union[str, Any] = [] def __UpperCamelCase ( self : Optional[int] ) -> list[list[int]]: return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def __UpperCamelCase ( self : int ) -> int: return len(self.rows ) @property def __UpperCamelCase ( self : Tuple ) -> int: return len(self.rows[0] ) @property def __UpperCamelCase ( self : Dict ) -> tuple[int, int]: return (self.num_rows, self.num_columns) @property def __UpperCamelCase ( self : Tuple ) -> bool: return self.order[0] == self.order[1] def __UpperCamelCase ( self : Any ) -> Matrix: lowercase__ : int = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(lowercase_ ) def __UpperCamelCase ( self : Optional[int] ) -> int: if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def __UpperCamelCase ( self : Union[str, Any] ) -> bool: return bool(self.determinant() ) def __UpperCamelCase ( self : int , lowercase_ : int , lowercase_ : int ) -> int: lowercase__ : List[str] = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(lowercase_ ).determinant() def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : int ) -> int: if (row + column) % 2 == 0: return self.get_minor(lowercase_ , lowercase_ ) return -1 * self.get_minor(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Optional[Any] ) -> Matrix: return Matrix( [ [self.get_minor(lowercase_ , lowercase_ ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def __UpperCamelCase ( self : Dict ) -> Matrix: return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def __UpperCamelCase ( self : int ) -> Matrix: lowercase__ : Union[str, Any] = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(lowercase_ ) def __UpperCamelCase ( self : Tuple ) -> Matrix: lowercase__ : List[str] = self.determinant() if not determinant: raise TypeError("Only matrices with a non-zero determinant have an inverse" ) return self.adjugate() * (1 / determinant) def __repr__( self : List[Any] ) -> str: return str(self.rows ) def __str__( self : List[str] ) -> str: if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ "[" + ". ".join([str(lowercase_ ) for value in row] ) + ".]" for row in self.rows ] ) + "]" ) def __UpperCamelCase ( self : int , lowercase_ : list[int] , lowercase_ : int | None = None ) -> None: lowercase__ : Union[str, Any] = TypeError("Row must be a list containing all ints and/or floats" ) if not isinstance(lowercase_ , lowercase_ ): raise type_error for value in row: if not isinstance(lowercase_ , (int, float) ): raise type_error if len(lowercase_ ) != self.num_columns: raise ValueError( "Row must be equal in length to the other rows in the matrix" ) if position is None: self.rows.append(lowercase_ ) else: lowercase__ : Union[str, Any] = self.rows[0:position] + [row] + self.rows[position:] def __UpperCamelCase ( self : Optional[int] , lowercase_ : list[int] , lowercase_ : int | None = None ) -> None: lowercase__ : Dict = TypeError( "Column must be a list containing all ints and/or floats" ) if not isinstance(lowercase_ , lowercase_ ): raise type_error for value in column: if not isinstance(lowercase_ , (int, float) ): raise type_error if len(lowercase_ ) != self.num_rows: raise ValueError( "Column must be equal in length to the other columns in the matrix" ) if position is None: lowercase__ : str = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: lowercase__ : str = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : int , lowercase_ : object ) -> bool: if not isinstance(lowercase_ , lowercase_ ): return NotImplemented return self.rows == other.rows def __ne__( self : Union[str, Any] , lowercase_ : object ) -> bool: return not self == other def __neg__( self : Union[str, Any] ) -> Matrix: return self * -1 def __add__( self : Optional[Any] , lowercase_ : Matrix ) -> Matrix: if self.order != other.order: raise ValueError("Addition requires matrices of the same order" ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : str , lowercase_ : Matrix ) -> Matrix: if self.order != other.order: raise ValueError("Subtraction requires matrices of the same order" ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : Any , lowercase_ : Matrix | int | float ) -> Matrix: if isinstance(lowercase_ , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(lowercase_ , lowercase_ ): if self.num_columns != other.num_rows: raise ValueError( "The number of columns in the first matrix must " "be equal to the number of rows in the second" ) return Matrix( [ [Matrix.dot_product(lowercase_ , lowercase_ ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( "A Matrix can only be multiplied by an int, float, or another matrix" ) def __pow__( self : Optional[Any] , lowercase_ : int ) -> Matrix: if not isinstance(lowercase_ , lowercase_ ): raise TypeError("A Matrix can only be raised to the power of an int" ) if not self.is_square: raise ValueError("Only square matrices can be raised to a power" ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( "Only invertable matrices can be raised to a negative power" ) lowercase__ : Optional[Any] = self for _ in range(other - 1 ): result *= self return result @classmethod def __UpperCamelCase ( cls : int , lowercase_ : list[int] , lowercase_ : list[int] ) -> int: return sum(row[i] * column[i] for i in range(len(lowercase_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
333
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False): try: lowercase__ : str = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : Union[str, Any] = default else: # KEY is set, convert it to True or False. try: lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) def lowercase_ ( _lowerCamelCase : int): return unittest.skip("Test was skipped")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Tuple): return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Dict): return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless( is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]): return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : str): return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None): if test_case is None: return partial(_lowerCamelCase , version=_lowerCamelCase) return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]): return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int): return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase) UpperCamelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless( _atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase) class snake_case_ ( unittest.TestCase ): __A : int = True @classmethod def __UpperCamelCase ( cls : str ) -> str: lowercase__ : str = tempfile.mkdtemp() @classmethod def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __UpperCamelCase ( self : str ) -> Optional[int]: if self.clear_on_setup: for path in Path(self.tmpdir ).glob("**/*" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(lowercase_ ) class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str: lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase_ ( _lowerCamelCase : int): lowercase__ : Tuple = AcceleratorState() lowercase__ : Optional[int] = tensor[None].clone().to(state.device) lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu() lowercase__ : Optional[Any] = tensor[0].cpu() for i in range(tensors.shape[0]): if not torch.equal(tensors[i] , _lowerCamelCase): return False return True class snake_case_ : def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]: lowercase__ : int = returncode lowercase__ : Dict = stdout lowercase__ : List[Any] = stderr async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str): while True: lowercase__ : int = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : str = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : Tuple = [] lowercase__ : List[Any] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))), asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True): lowercase__ : Optional[Any] = asyncio.get_event_loop() lowercase__ : List[Any] = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : str = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Dict = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') return result class snake_case_ ( __A ): pass def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False): try: lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT) if return_stdout: if hasattr(_lowerCamelCase , "decode"): lowercase__ : Optional[Any] = output.decode("utf-8") return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
333
1
import numpy as np from PIL import Image def lowercase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int): lowercase__ : str = np.array(_lowerCamelCase) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix") lowercase__ : Union[str, Any] = 0 lowercase__ : Dict = 0 lowercase__ : str = 0 lowercase__ : Union[str, Any] = 0 # compute the shape of the output matrix lowercase__ : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape lowercase__ : List[Any] = np.zeros((maxpool_shape, maxpool_shape)) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix lowercase__ : str = np.max(arr[i : i + size, j : j + size]) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowercase__ : List[str] = 0 lowercase__ : Any = 0 return updated_arr def lowercase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int): lowercase__ : int = np.array(_lowerCamelCase) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix") lowercase__ : str = 0 lowercase__ : Any = 0 lowercase__ : Dict = 0 lowercase__ : List[Any] = 0 # compute the shape of the output matrix lowercase__ : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape lowercase__ : Optional[Any] = np.zeros((avgpool_shape, avgpool_shape)) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix lowercase__ : str = int(np.average(arr[i : i + size, j : j + size])) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowercase__ : List[Any] = 0 lowercase__ : Optional[int] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image UpperCamelCase = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
333
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : int = ["flax"] def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Optional[Any] = ["flax"] def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Dict = ["flax"] def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[Any] = ["flax"] def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : Optional[int] = ["flax"] def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : int = ["flax"] def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]: requires_backends(cls , ["flax"] ) class snake_case_ ( metaclass=__A ): __A : List[str] = ["flax"] def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple: requires_backends(self , ["flax"] ) @classmethod def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]: requires_backends(cls , ["flax"] ) @classmethod def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]: requires_backends(cls , ["flax"] )
333
1
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class snake_case_ : def __init__( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : Optional[Any]=7 , lowercase_ : Optional[Any]=6 , lowercase_ : Optional[int]=17 , lowercase_ : str=23 , lowercase_ : Any=11 , lowercase_ : str=True , ) -> Optional[Any]: lowercase__ : Optional[Any] = parent lowercase__ : Tuple = batch_size lowercase__ : str = seq_length lowercase__ : List[Any] = act_dim lowercase__ : List[str] = state_dim lowercase__ : Union[str, Any] = hidden_size lowercase__ : str = max_length lowercase__ : Optional[Any] = is_training def __UpperCamelCase ( self : List[Any] ) -> Tuple: lowercase__ : str = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) lowercase__ : List[str] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) lowercase__ : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) ) lowercase__ : str = floats_tensor((self.batch_size, self.seq_length, 1) ) lowercase__ : Any = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 ) lowercase__ : Optional[Any] = random_attention_mask((self.batch_size, self.seq_length) ) lowercase__ : List[str] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __UpperCamelCase ( self : Dict , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , ) -> Optional[int]: lowercase__ : Optional[int] = DecisionTransformerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Any = model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __UpperCamelCase ( self : str ) -> List[Any]: lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : Dict = config_and_inputs lowercase__ : Optional[Any] = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ): __A : int = (DecisionTransformerModel,) if is_torch_available() else () __A : Any = () __A : List[Any] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids __A : List[Any] = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features __A : List[str] = False __A : Union[str, Any] = False __A : Dict = False __A : Optional[int] = False __A : Any = False __A : Tuple = False __A : Optional[int] = False __A : int = False __A : str = False def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : Any = DecisionTransformerModelTester(self ) lowercase__ : int = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def __UpperCamelCase ( self : int ) -> Optional[int]: self.config_tester.run_common_tests() def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) @slow def __UpperCamelCase ( self : int ) -> Optional[Any]: for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = DecisionTransformerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : int ) -> List[str]: lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(lowercase_ ) lowercase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Any = [*signature.parameters.keys()] lowercase__ : Any = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(lowercase_ )] , lowercase_ ) @require_torch class snake_case_ ( unittest.TestCase ): @slow def __UpperCamelCase ( self : Optional[int] ) -> List[str]: lowercase__ : Optional[int] = 2 # number of steps of autoregressive prediction we will perform lowercase__ : int = 10 # defined by the RL environment, may be normalized lowercase__ : List[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) lowercase__ : Optional[int] = model.to(lowercase_ ) lowercase__ : Any = model.config torch.manual_seed(0 ) lowercase__ : Optional[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=lowercase_ , dtype=torch.floataa ) # env.reset() lowercase__ : Tuple = torch.tensor( [[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=lowercase_ ) lowercase__ : List[Any] = torch.tensor(lowercase_ , device=lowercase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) lowercase__ : Tuple = state lowercase__ : Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=lowercase_ , dtype=torch.floataa ) lowercase__ : Union[str, Any] = torch.zeros(1 , 0 , device=lowercase_ , dtype=torch.floataa ) lowercase__ : Optional[Any] = torch.tensor(0 , device=lowercase_ , dtype=torch.long ).reshape(1 , 1 ) for step in range(lowercase_ ): lowercase__ : str = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowercase_ )] , dim=1 ) lowercase__ : int = torch.cat([rewards, torch.zeros(1 , 1 , device=lowercase_ )] , dim=1 ) lowercase__ : int = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): lowercase__ , lowercase__ , lowercase__ : str = model( states=lowercase_ , actions=lowercase_ , rewards=lowercase_ , returns_to_go=lowercase_ , timesteps=lowercase_ , attention_mask=lowercase_ , return_dict=lowercase_ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=lowercase_ , dtype=torch.floataa ), 1.0, False, {}, ) lowercase__ : Tuple = action_pred[0, -1] lowercase__ : List[Any] = torch.cat([states, state] , dim=1 ) lowercase__ : Any = returns_to_go[0, -1] - reward lowercase__ : str = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) lowercase__ : Optional[int] = torch.cat( [timesteps, torch.ones((1, 1) , device=lowercase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
333
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class snake_case_ ( __A ): __A : List[str] = "vit_mae" def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]: super().__init__(**lowercase_ ) lowercase__ : List[str] = hidden_size lowercase__ : str = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : List[Any] = intermediate_size lowercase__ : str = hidden_act lowercase__ : List[str] = hidden_dropout_prob lowercase__ : Optional[Any] = attention_probs_dropout_prob lowercase__ : Any = initializer_range lowercase__ : Optional[Any] = layer_norm_eps lowercase__ : Optional[Any] = image_size lowercase__ : Optional[int] = patch_size lowercase__ : Any = num_channels lowercase__ : str = qkv_bias lowercase__ : Optional[Any] = decoder_num_attention_heads lowercase__ : Any = decoder_hidden_size lowercase__ : Any = decoder_num_hidden_layers lowercase__ : Union[str, Any] = decoder_intermediate_size lowercase__ : int = mask_ratio lowercase__ : Tuple = norm_pix_loss
333
1
def lowercase_ ( _lowerCamelCase : Union[str, Any]): lowercase__ : List[Any] = [] lowercase__ : Any = set({"(", "[", "{"}) lowercase__ : Any = set({")", "]", "}"}) lowercase__ : Any = {"{": "}", "[": "]", "(": ")"} for i in range(len(_lowerCamelCase)): if s[i] in open_brackets: stack.append(s[i]) elif s[i] in closed_brackets and ( len(_lowerCamelCase) == 0 or (len(_lowerCamelCase) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(_lowerCamelCase) == 0 def lowercase_ ( ): lowercase__ : int = input("Enter sequence of brackets: ") if is_balanced(_lowerCamelCase): print(_lowerCamelCase , "is balanced") else: print(_lowerCamelCase , "is not balanced") if __name__ == "__main__": main()
333
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): while a != 0: lowercase__ , lowercase__ : Dict = b % a, a return b def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): if gcd(_lowerCamelCase , _lowerCamelCase) != 1: lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(_lowerCamelCase) lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m while va != 0: lowercase__ : Tuple = ua // va lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
333
1
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowercase_ ( _lowerCamelCase : List[str]): return 1 / (1 + np.exp(-z)) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple): return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase))) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000): lowercase__ : Optional[int] = np.zeros(x.shape[1]) for iterations in range(_lowerCamelCase): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Tuple = sigmoid_function(_lowerCamelCase) lowercase__ : Dict = np.dot(x.T , h - y) / y.size lowercase__ : int = theta - alpha * gradient # updating the weights lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase) lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase) if iterations % 100 == 0: print(f'''loss: {j} \t''') # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase = datasets.load_iris() UpperCamelCase = iris.data[:, :2] UpperCamelCase = (iris.target != 0) * 1 UpperCamelCase = 0.1 UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000) print('''theta: ''', theta) # printing the theta i.e our weights vector def lowercase_ ( _lowerCamelCase : List[Any]): return sigmoid_function( np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
333
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "): lowercase__ : Union[str, Any] = text.split(_lowerCamelCase) return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)] def lowercase_ ( _lowerCamelCase : dict): lowercase__ , lowercase__ : List[str] = [], [] for title, text in zip(documents["title"] , documents["text"]): if text is not None: for passage in split_text(_lowerCamelCase): titles.append(title if title is not None else "") texts.append(_lowerCamelCase) return {"title": titles, "text": texts} def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast): lowercase__ : Union[str, Any] = ctx_tokenizer( documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"] lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ): ###################################### logger.info("Step 1 - Create the dataset") ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase__ : str = load_dataset( "csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"]) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc) # And compute the embeddings lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase) lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) lowercase__ : List[Any] = Features( {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space lowercase__ : List[Any] = dataset.map( partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , ) # And finally save your dataset lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset") dataset.save_to_disk(_lowerCamelCase) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset") ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT) dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase) # And save the index lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss") dataset.get_index("embeddings").save(_lowerCamelCase) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class snake_case_ : __A : str = field( default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,) __A : Optional[str] = field( default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,) __A : str = field( default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,) __A : str = field( default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } ,) __A : Optional[str] = field( default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,) @dataclass class snake_case_ : __A : Optional[int] = field( default=__A ,metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } ,) __A : int = field( default=16 ,metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } ,) @dataclass class snake_case_ : __A : int = field( default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,) __A : int = field( default=128 ,metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } ,) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
333
1
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node UpperCamelCase = 4 UpperCamelCase = 3 class snake_case_ ( __A ): pass def lowercase_ ( _lowerCamelCase : List[str]): for shard in shards: for i in range(_lowerCamelCase): yield {"i": i, "shard": shard} def lowercase_ ( ): lowercase__ : List[str] = int(os.environ["RANK"]) lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"]) lowercase__ : Union[str, Any] = ArgumentParser() parser.add_argument("--streaming" , type=_lowerCamelCase) parser.add_argument("--local_rank" , type=_lowerCamelCase) parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0) lowercase__ : int = parser.parse_args() lowercase__ : Union[str, Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]} lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase) if not streaming: lowercase__ : str = Dataset.from_list(list(_lowerCamelCase)) lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase) lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase) lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : Any = full_size // world_size expected_local_size += int(rank < (full_size % world_size)) lowercase__ : List[str] = sum(1 for _ in dataloader) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''') if __name__ == "__main__": main()
333
import argparse import datetime def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[Any] = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_lowerCamelCase) < 11: raise ValueError("Must be 10 characters long") # Get month lowercase__ : int = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12") lowercase__ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'") # Get day lowercase__ : int = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31") # Get second separator lowercase__ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'") # Get year lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?") # Get datetime obj for validation lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase)) # Start math if m <= 2: lowercase__ : Optional[Any] = y - 1 lowercase__ : int = m + 12 # maths var lowercase__ : int = int(str(_lowerCamelCase)[:2]) lowercase__ : int = int(str(_lowerCamelCase)[2:]) lowercase__ : int = int(2.6 * m - 5.39) lowercase__ : int = int(c / 4) lowercase__ : int = int(k / 4) lowercase__ : int = int(d + k) lowercase__ : int = int(t + u + v + x) lowercase__ : int = int(z - (2 * c)) lowercase__ : int = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer.") # Response lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) UpperCamelCase = parser.parse_args() zeller(args.date_input)
333
1
import argparse import datetime def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[Any] = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_lowerCamelCase) < 11: raise ValueError("Must be 10 characters long") # Get month lowercase__ : int = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12") lowercase__ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'") # Get day lowercase__ : int = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31") # Get second separator lowercase__ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'") # Get year lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?") # Get datetime obj for validation lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase)) # Start math if m <= 2: lowercase__ : Optional[Any] = y - 1 lowercase__ : int = m + 12 # maths var lowercase__ : int = int(str(_lowerCamelCase)[:2]) lowercase__ : int = int(str(_lowerCamelCase)[2:]) lowercase__ : int = int(2.6 * m - 5.39) lowercase__ : int = int(c / 4) lowercase__ : int = int(k / 4) lowercase__ : int = int(d + k) lowercase__ : int = int(t + u + v + x) lowercase__ : int = int(z - (2 * c)) lowercase__ : int = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer.") # Response lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) UpperCamelCase = parser.parse_args() zeller(args.date_input)
333
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node UpperCamelCase = 4 UpperCamelCase = 3 class snake_case_ ( __A ): pass def lowercase_ ( _lowerCamelCase : List[str]): for shard in shards: for i in range(_lowerCamelCase): yield {"i": i, "shard": shard} def lowercase_ ( ): lowercase__ : List[str] = int(os.environ["RANK"]) lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"]) lowercase__ : Union[str, Any] = ArgumentParser() parser.add_argument("--streaming" , type=_lowerCamelCase) parser.add_argument("--local_rank" , type=_lowerCamelCase) parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0) lowercase__ : int = parser.parse_args() lowercase__ : Union[str, Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]} lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase) if not streaming: lowercase__ : str = Dataset.from_list(list(_lowerCamelCase)) lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase) lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase) lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : Any = full_size // world_size expected_local_size += int(rank < (full_size % world_size)) lowercase__ : List[str] = sum(1 for _ in dataloader) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''') if __name__ == "__main__": main()
333
1