code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _lowerCamelCase : Tuple = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Any = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : int = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _lowerCamelCase : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict: """simple docstring""" UpperCamelCase = state_dict.pop(A__ ) UpperCamelCase = val def __lowerCamelCase ( A__ ) -> int: """simple docstring""" UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def __lowerCamelCase ( A__ ) -> Dict: """simple docstring""" UpperCamelCase = '' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase = state_dict.pop( F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase = in_proj_weight_cross_attn[:256, :] UpperCamelCase = in_proj_bias_cross_attn[:256] UpperCamelCase = in_proj_weight_cross_attn[256:512, :] UpperCamelCase = in_proj_bias_cross_attn[256:512] UpperCamelCase = in_proj_weight_cross_attn[-256:, :] UpperCamelCase = in_proj_bias_cross_attn[-256:] def __lowerCamelCase ( A__ , A__ ) -> Optional[int]: """simple docstring""" UpperCamelCase , UpperCamelCase = image.size UpperCamelCase = max(A__ , A__ ) UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000 UpperCamelCase = target_max_size / current_max_size UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def __lowerCamelCase ( A__ ) -> List[Any]: """simple docstring""" UpperCamelCase = F.to_tensor(A__ ) UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]: """simple docstring""" logger.info('Converting model...' ) # load original state dict UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' ) # rename keys for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) UpperCamelCase = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = 'model.' for key in state_dict.copy().keys(): if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): UpperCamelCase = state_dict.pop(A__ ) UpperCamelCase = val # create HuggingFace model and load state dict UpperCamelCase = TableTransformerConfig( backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase = 15 UpperCamelCase = 2 UpperCamelCase = {0: 'table', 1: 'table rotated'} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} else: UpperCamelCase = 125 UpperCamelCase = 6 UpperCamelCase = { 0: 'table', 1: 'table column', 2: 'table row', 3: 'table column header', 4: 'table projected row header', 5: 'table spanning cell', } UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} UpperCamelCase = DetrImageProcessor( format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 ) UpperCamelCase = TableTransformerForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() # verify our conversion UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png' UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ ) UpperCamelCase = Image.open(A__ ).convert('RGB' ) UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 ) UpperCamelCase = model(A__ ) if "detection" in checkpoint_url: UpperCamelCase = (1, 15, 3) UpperCamelCase = torch.tensor( [[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] ) UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] ) else: UpperCamelCase = (1, 125, 7) UpperCamelCase = torch.tensor( [[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] ) UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if push_to_hub: # Push model to HF hub logger.info('Pushing model to the hub...' ) UpperCamelCase = ( 'microsoft/table-transformer-detection' if 'detection' in checkpoint_url else 'microsoft/table-transformer-structure-recognition' ) model.push_to_hub(A__ ) image_processor.push_to_hub(A__ ) if __name__ == "__main__": _lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", type=str, choices=[ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", ], help="URL of the Table Transformer checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowerCamelCase : int = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
28
1
'''simple docstring''' _lowerCamelCase : Optional[Any] = 9.8_06_65 def __lowerCamelCase ( A__ , A__ , A__ = g ) -> float: """simple docstring""" if fluid_density <= 0: raise ValueError('Impossible fluid density' ) if volume < 0: raise ValueError('Impossible Object volume' ) if gravity <= 0: raise ValueError('Impossible Gravity' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
28
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowerCamelCase : Any = logging.get_logger(__name__) @add_end_docstrings(_a ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ): """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) requires_backends(self , 'decord' ) self.check_model_type(UpperCamelCase__ ) def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ): """simple docstring""" UpperCamelCase = {} if frame_sampling_rate is not None: UpperCamelCase = frame_sampling_rate if num_frames is not None: UpperCamelCase = num_frames UpperCamelCase = {} if top_k is not None: UpperCamelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ): """simple docstring""" return super().__call__(UpperCamelCase__ , **UpperCamelCase__ ) def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ): """simple docstring""" if num_frames is None: UpperCamelCase = self.model.config.num_frames if video.startswith('http://' ) or video.startswith('https://' ): UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content ) UpperCamelCase = VideoReader(UpperCamelCase__ ) videoreader.seek(0 ) UpperCamelCase = 0 UpperCamelCase = num_frames * frame_sampling_rate - 1 UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa ) UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy() UpperCamelCase = list(UpperCamelCase__ ) UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework ) return model_inputs def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = self.model(**UpperCamelCase__ ) return model_outputs def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCamelCase = self.model.config.num_labels if self.framework == "pt": UpperCamelCase = model_outputs.logits.softmax(-1 )[0] UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ ) else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCamelCase = scores.tolist() UpperCamelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
28
1
'''simple docstring''' from math import factorial def __lowerCamelCase ( A__ , A__ , A__ ) -> float: """simple docstring""" if successes > trials: raise ValueError('successes must be lower or equal to trials' ) if trials < 0 or successes < 0: raise ValueError('the function is defined for non-negative integers' ) if not isinstance(A__ , A__ ) or not isinstance(A__ , A__ ): raise ValueError('the function is defined for non-negative integers' ) if not 0 < prob < 1: raise ValueError('prob has to be in range of 1 - 0' ) UpperCamelCase = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! UpperCamelCase = float(factorial(A__ ) ) coefficient /= factorial(A__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("Probability of 2 successes out of 4 trails") print("with probability of 0.75 is:", end=" ") print(binomial_distribution(2, 4, 0.75))
28
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _lowerCamelCase : Optional[int] = ( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) _lowerCamelCase : Union[str, Any] = ( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) _lowerCamelCase : Dict = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) _lowerCamelCase : Dict = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) _lowerCamelCase : Optional[Any] = ( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]), ("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [14, 13, 12, 11, 10]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) _lowerCamelCase : List[Any] = ( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) _lowerCamelCase : List[str] = ( ("JH AH TH KH QH", 23), ("JH 9H TH KH QH", 22), ("JC KH JS JD JH", 21), ("KH KC 3S 3H 3D", 20), ("8C 9C 5C 3C TC", 19), ("JS QS 9H TS KH", 18), ("7C 7S KH 2H 7H", 17), ("3C KH 5D 5S KH", 16), ("QH 8H KD JH 8S", 15), ("2D 6D 9D TH 7D", 14), ) def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) ) UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]: """simple docstring""" return (generate_random_hand() for _ in range(A__ )) @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" assert PokerHand(A__ )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" assert PokerHand(A__ )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , A__ ) def __lowerCamelCase ( A__ , A__ , A__ ) -> str: """simple docstring""" UpperCamelCase = PokerHand(A__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Dict: """simple docstring""" assert PokerHand(A__ )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> str: """simple docstring""" assert PokerHand(A__ )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , A__ ) def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple: """simple docstring""" assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]: """simple docstring""" assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected def __lowerCamelCase ( ) -> str: """simple docstring""" UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS] UpperCamelCase = poker_hands.copy() shuffle(A__ ) UpperCamelCase = chain(sorted(A__ ) ) for index, hand in enumerate(A__ ): assert hand == poker_hands[index] def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" # Test that five high straights are compared correctly. UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=A__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def __lowerCamelCase ( ) -> str: """simple docstring""" # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. UpperCamelCase = PokerHand('2C 4S AS 3D 5C' ) UpperCamelCase = True UpperCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def __lowerCamelCase ( ) -> List[str]: """simple docstring""" # Problem number 54 from Project Euler # Testing from poker_hands.txt file UpperCamelCase = 0 UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) ) UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' ) with open(A__ ) as file_hand: for line in file_hand: UpperCamelCase = line[:14].strip() UpperCamelCase = line[15:].strip() UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ ) UpperCamelCase = player.compare_with(A__ ) if output == "Win": answer += 1 assert answer == 376
28
1
'''simple docstring''' # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = { 'en': 'Machine learning is great, isn\'t it?', 'ru': 'Машинное обучение - это здорово, не так ли?', 'de': 'Maschinelles Lernen ist großartig, nicht wahr?', } # BLUE scores as follows: # "pair": [fairseq, transformers] UpperCamelCase = { 'wmt16-en-de-dist-12-1': [28.3, 27.52], 'wmt16-en-de-dist-6-1': [27.4, 27.11], 'wmt16-en-de-12-1': [26.9, 25.75], } UpperCamelCase = F"""{src_lang}-{tgt_lang}""" UpperCamelCase = F""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"allenai/{model_name}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` """ model_card_dir.mkdir(parents=A__ , exist_ok=A__ ) UpperCamelCase = os.path.join(A__ , 'README.md' ) print(F"""Generating {path}""" ) with open(A__ , 'w' , encoding='utf-8' ) as f: f.write(A__ ) # make sure we are under the root of the project _lowerCamelCase : str = Path(__file__).resolve().parent.parent.parent _lowerCamelCase : Tuple = repo_dir / "model_cards" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: _lowerCamelCase : Any = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
28
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = None class SCREAMING_SNAKE_CASE ( _a , _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 2 @register_to_config def __init__( self : Union[str, Any] , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 1.0_0_7 , UpperCamelCase__ : float = 8_0 , UpperCamelCase__ : float = 0.0_5 , UpperCamelCase__ : float = 5_0 , ): """simple docstring""" UpperCamelCase = sigma_max # setable values UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None # sigma(t_i) def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ): """simple docstring""" return sample def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ): """simple docstring""" UpperCamelCase = num_inference_steps UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy() UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ ) UpperCamelCase = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] UpperCamelCase = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ ) def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : Optional[torch.Generator] = None ): """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: UpperCamelCase = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device ) UpperCamelCase = sigma + gamma * sigma UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = sample_hat + sigma_hat * model_output UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = sample_prev + sigma_prev * model_output UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ): """simple docstring""" raise NotImplementedError()
28
1
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger(__name__) _lowerCamelCase : Optional[Any] = "Hello world! cécé herlolip" def __lowerCamelCase ( A__ , A__ , A__ ) -> str: """simple docstring""" UpperCamelCase = FairseqRobertaModel.from_pretrained(A__ ) roberta.eval() # disable dropout UpperCamelCase = roberta.model.encoder.sentence_encoder UpperCamelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0] print('Our RoBERTa config:' , A__ ) UpperCamelCase = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ ) model.eval() # Now let's copy all the weights. # Embeddings UpperCamelCase = roberta_sent_encoder.embed_tokens.weight UpperCamelCase = roberta_sent_encoder.embed_positions.weight UpperCamelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. UpperCamelCase = roberta_sent_encoder.layer_norm.weight UpperCamelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer UpperCamelCase = model.roberta.encoder.layer[i] UpperCamelCase = roberta_sent_encoder.layers[i] UpperCamelCase = layer.attention UpperCamelCase = roberta_layer.self_attn_layer_norm.weight UpperCamelCase = roberta_layer.self_attn_layer_norm.bias # self attention UpperCamelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) UpperCamelCase = roberta_layer.self_attn.q_proj.weight UpperCamelCase = roberta_layer.self_attn.q_proj.bias UpperCamelCase = roberta_layer.self_attn.k_proj.weight UpperCamelCase = roberta_layer.self_attn.k_proj.bias UpperCamelCase = roberta_layer.self_attn.v_proj.weight UpperCamelCase = roberta_layer.self_attn.v_proj.bias # self-attention output UpperCamelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape UpperCamelCase = roberta_layer.self_attn.out_proj.weight UpperCamelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm UpperCamelCase = roberta_layer.final_layer_norm.weight UpperCamelCase = roberta_layer.final_layer_norm.bias # intermediate UpperCamelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape UpperCamelCase = roberta_layer.fca.weight UpperCamelCase = roberta_layer.fca.bias # output UpperCamelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape UpperCamelCase = roberta_layer.fca.weight UpperCamelCase = roberta_layer.fca.bias # end of layer if classification_head: UpperCamelCase = roberta.model.classification_heads['mnli'].dense.weight UpperCamelCase = roberta.model.classification_heads['mnli'].dense.bias UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.weight UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.bias else: # LM Head UpperCamelCase = roberta.model.encoder.lm_head.dense.weight UpperCamelCase = roberta.model.encoder.lm_head.dense.bias UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.weight UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.bias UpperCamelCase = roberta.model.encoder.lm_head.weight UpperCamelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. UpperCamelCase = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1 UpperCamelCase = model(A__ )[0] if classification_head: UpperCamelCase = roberta.model.classification_heads['mnli'](roberta.extract_features(A__ ) ) else: UpperCamelCase = roberta.model(A__ )[0] print(our_output.shape , their_output.shape ) UpperCamelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 UpperCamelCase = torch.allclose(A__ , A__ , atol=1e-3 ) print('Do both models output the same tensors?' , '🔥' if success else '💩' ) if not success: raise Exception('Something went wRoNg' ) pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) _lowerCamelCase : Any = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
28
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Tuple = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: _lowerCamelCase : Tuple = None _lowerCamelCase : int = logging.get_logger(__name__) _lowerCamelCase : Any = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _lowerCamelCase : Union[str, Any] = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } _lowerCamelCase : str = { "facebook/mbart-large-en-ro": 1024, "facebook/mbart-large-cc25": 1024, } # fmt: off _lowerCamelCase : List[str] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""] _SCREAMING_SNAKE_CASE = MBartTokenizer _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] def __init__( self : Optional[int] , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Optional[int]="</s>" , UpperCamelCase__ : Tuple="<s>" , UpperCamelCase__ : Optional[Any]="<unk>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : Optional[Any]="<mask>" , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Union[str, Any] , ): """simple docstring""" UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token super().__init__( vocab_file=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) UpperCamelCase = vocab_file UpperCamelCase = False if not self.vocab_file else True UpperCamelCase = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) UpperCamelCase = { lang_code: self.convert_tokens_to_ids(UpperCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCamelCase = src_lang if src_lang is not None else 'en_XX' UpperCamelCase = self.convert_tokens_to_ids(self._src_lang ) UpperCamelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def A ( self : str ): """simple docstring""" return self._src_lang @src_lang.setter def A ( self : Optional[Any] , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def A ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : int ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) UpperCamelCase = src_lang UpperCamelCase = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) UpperCamelCase = self.convert_tokens_to_ids(UpperCamelCase__ ) UpperCamelCase = tgt_lang_id return inputs def A ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : int , ): """simple docstring""" UpperCamelCase = src_lang UpperCamelCase = tgt_lang return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) def A ( self : int ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def A ( self : Union[str, Any] ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def A ( self : Tuple , UpperCamelCase__ : Any ): """simple docstring""" UpperCamelCase = self.convert_tokens_to_ids(UpperCamelCase__ ) UpperCamelCase = [] UpperCamelCase = [self.eos_token_id, self.cur_lang_code] UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCamelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def A ( self : List[str] , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = self.convert_tokens_to_ids(UpperCamelCase__ ) UpperCamelCase = [] UpperCamelCase = [self.eos_token_id, self.cur_lang_code] UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCamelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(UpperCamelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return UpperCamelCase = os.path.join( UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) return (out_vocab_file,)
28
'''simple docstring''' def __lowerCamelCase ( A__ = 10**9 ) -> int: """simple docstring""" UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
28
1
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""] _SCREAMING_SNAKE_CASE = """BlipImageProcessor""" _SCREAMING_SNAKE_CASE = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" UpperCamelCase = False super().__init__(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = self.image_processor def __call__( self : Union[str, Any] , UpperCamelCase__ : ImageInput = None , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : int , ): """simple docstring""" if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: UpperCamelCase = self.tokenizer UpperCamelCase = self.tokenizer( text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , ) return text_encoding # add pixel_values UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) if text is not None: UpperCamelCase = self.tokenizer( text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , ) else: UpperCamelCase = None if text_encoding is not None: encoding_image_processor.update(UpperCamelCase__ ) return encoding_image_processor def A ( self : Tuple , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Any ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def A ( self : Union[str, Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Dict ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @property def A ( self : str ): """simple docstring""" UpperCamelCase = self.tokenizer.model_input_names UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
28
'''simple docstring''' import math class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1 """simple docstring""" UpperCamelCase = n UpperCamelCase = [ [math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ ) ] # adjacency matrix for weight UpperCamelCase = [ [math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ ) ] # dp[i][j] stores minimum distance from i to j def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = w def A ( self : str ): """simple docstring""" for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ): """simple docstring""" return self.dp[u][v] if __name__ == "__main__": _lowerCamelCase : List[str] = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
28
1
'''simple docstring''' def __lowerCamelCase ( A__ , A__ ) -> list: """simple docstring""" UpperCamelCase = len(A__ ) UpperCamelCase = [] for i in range(len(A__ ) - pat_len + 1 ): UpperCamelCase = True for j in range(A__ ): if s[i + j] != pattern[j]: UpperCamelCase = False break if match_found: position.append(A__ ) return position if __name__ == "__main__": assert naive_pattern_search("ABCDEFG", "DE") == [3] print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
28
'''simple docstring''' _lowerCamelCase : int = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
28
1
'''simple docstring''' import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger _lowerCamelCase : Dict = get_logger(__name__) class SCREAMING_SNAKE_CASE ( enum.Enum ): """simple docstring""" _SCREAMING_SNAKE_CASE = """all_checks""" _SCREAMING_SNAKE_CASE = """basic_checks""" _SCREAMING_SNAKE_CASE = """no_checks""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __lowerCamelCase ( A__ , A__ , A__=None ) -> List[Any]: """simple docstring""" if expected_checksums is None: logger.info('Unable to verify checksums.' ) return if len(set(A__ ) - set(A__ ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(A__ ) - set(A__ ) ) ) if len(set(A__ ) - set(A__ ) ) > 0: raise UnexpectedDownloadedFile(str(set(A__ ) - set(A__ ) ) ) UpperCamelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] UpperCamelCase = ' for ' + verification_name if verification_name is not None else '' if len(A__ ) > 0: raise NonMatchingChecksumError( F"""Checksums didn't match{for_verification_name}:\n""" F"""{bad_urls}\n""" 'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' ) logger.info('All the checksums matched successfully' + for_verification_name ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __lowerCamelCase ( A__ , A__ ) -> Tuple: """simple docstring""" if expected_splits is None: logger.info('Unable to verify splits sizes.' ) return if len(set(A__ ) - set(A__ ) ) > 0: raise ExpectedMoreSplits(str(set(A__ ) - set(A__ ) ) ) if len(set(A__ ) - set(A__ ) ) > 0: raise UnexpectedSplits(str(set(A__ ) - set(A__ ) ) ) UpperCamelCase = [ {'expected': expected_splits[name], 'recorded': recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(A__ ) > 0: raise NonMatchingSplitsSizesError(str(A__ ) ) logger.info('All the splits matched successfully.' ) def __lowerCamelCase ( A__ , A__ = True ) -> dict: """simple docstring""" if record_checksum: UpperCamelCase = shaaaa() with open(A__ , 'rb' ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , B'' ): m.update(A__ ) UpperCamelCase = m.hexdigest() else: UpperCamelCase = None return {"num_bytes": os.path.getsize(A__ ), "checksum": checksum} def __lowerCamelCase ( A__ ) -> Optional[int]: """simple docstring""" if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
28
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCamelCase : List[Any] = { "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
1
'''simple docstring''' from typing import List import numpy as np def __lowerCamelCase ( A__ ) -> int: """simple docstring""" UpperCamelCase = {key: len(A__ ) for key, value in gen_kwargs.items() if isinstance(A__ , A__ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) UpperCamelCase = max(lists_lengths.values() , default=0 ) return max(1 , A__ ) def __lowerCamelCase ( A__ , A__ ) -> List[range]: """simple docstring""" UpperCamelCase = [] for group_idx in range(A__ ): UpperCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break UpperCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 UpperCamelCase = range(A__ , start + num_shards_to_add ) shards_indices_per_group.append(A__ ) return shards_indices_per_group def __lowerCamelCase ( A__ , A__ ) -> List[dict]: """simple docstring""" UpperCamelCase = _number_of_shards_in_gen_kwargs(A__ ) if num_shards == 1: return [dict(A__ )] else: UpperCamelCase = _distribute_shards(num_shards=A__ , max_num_jobs=A__ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(A__ , A__ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(A__ ) ) ] def __lowerCamelCase ( A__ ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , A__ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowerCamelCase ( A__ , A__ ) -> dict: """simple docstring""" UpperCamelCase = {len(A__ ) for value in gen_kwargs.values() if isinstance(A__ , A__ )} UpperCamelCase = {} for size in list_sizes: UpperCamelCase = list(range(A__ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes UpperCamelCase = dict(A__ ) for key, value in shuffled_kwargs.items(): if isinstance(A__ , A__ ): UpperCamelCase = [value[i] for i in indices_per_size[len(A__ )]] return shuffled_kwargs
28
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict: """simple docstring""" UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T return jnp.matmul(A__ , norm_emb_a.T ) class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = jnp.floataa def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config ) UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype ) UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) ) UpperCamelCase = self.param( 'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) ) UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) ) UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) ) def __call__( self : str , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = self.vision_model(UpperCamelCase__ )[1] UpperCamelCase = self.visual_projection(UpperCamelCase__ ) UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds ) UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs UpperCamelCase = 0.0 UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment UpperCamelCase = jnp.round(UpperCamelCase__ , 3 ) UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ ) # Use a lower threshold if an image has any special care concept UpperCamelCase = is_special_care * 0.0_1 UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment UpperCamelCase = jnp.round(UpperCamelCase__ , 3 ) UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = CLIPConfig _SCREAMING_SNAKE_CASE = """clip_input""" _SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ): """simple docstring""" if input_shape is None: UpperCamelCase = (1, 2_2_4, 2_2_4, 3) UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ ) super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init ) def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ): """simple docstring""" UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ ) UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng} UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params'] return random_params def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ): """simple docstring""" UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) ) return self.module.apply( {'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
28
1
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( A__ , A__ ) -> bool: """simple docstring""" if len(A__ ) == 0: return False UpperCamelCase = len(A__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , A__ ) else: return binary_search(a_list[midpoint + 1 :] , A__ ) if __name__ == "__main__": _lowerCamelCase : List[str] = input("Enter numbers separated by comma:\n").strip() _lowerCamelCase : Union[str, Any] = [int(item.strip()) for item in user_input.split(",")] _lowerCamelCase : Union[str, Any] = int(input("Enter the number to be found in the list:\n").strip()) _lowerCamelCase : int = "" if binary_search(sequence, target) else "not " print(f'''{target} was {not_str}found in {sequence}''')
28
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _lowerCamelCase : str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ): """simple docstring""" warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
28
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Dict = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : str = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed _lowerCamelCase : Optional[int] = logging.getLogger(__name__) def __lowerCamelCase ( A__=2 , A__=3 , A__=16 , A__ = 10 , A__ = 2 ) -> int: """simple docstring""" def get_dataset(A__ ): UpperCamelCase = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCamelCase = get_dataset(A__ ) UpperCamelCase = get_dataset(A__ ) UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__=None ) -> int: """simple docstring""" UpperCamelCase = [] for epoch in range(A__ ): # Train quickly model.train() for batch in dataloader: UpperCamelCase , UpperCamelCase = batch UpperCamelCase = model(A__ ) UpperCamelCase = torch.nn.functional.mse_loss(A__ , A__ ) accelerator.backward(A__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self : Tuple ): """simple docstring""" super().__init__() UpperCamelCase = nn.Parameter(torch.randn(1 ) ) UpperCamelCase = nn.Parameter(torch.randn(1 ) ) def A ( self : str , UpperCamelCase__ : Dict ): """simple docstring""" return x * self.a + self.b class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def A ( self : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase__ , automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def A ( self : Optional[int] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() # Train baseline UpperCamelCase = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial UpperCamelCase = os.path.join(UpperCamelCase__ , 'initial' ) accelerator.save_state(UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) accelerator.load_state(UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save everything UpperCamelCase = os.path.join(UpperCamelCase__ , 'checkpoint' ) accelerator.save_state(UpperCamelCase__ ) # Load everything back in and make sure all states work accelerator.load_state(UpperCamelCase__ ) test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase__ ) UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch.tensor([1, 2, 3] ) UpperCamelCase = torch.tensor([2, 3, 4] ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(net.parameters() ) UpperCamelCase = Accelerator() with self.assertRaises(UpperCamelCase__ ) as ve: accelerator.register_for_checkpointing(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def A ( self : Dict ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase__ , step_size=1 , gamma=0.9_9 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() UpperCamelCase = scheduler.state_dict() train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertNotEqual(UpperCamelCase__ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(UpperCamelCase__ , scheduler.state_dict() ) def A ( self : List[str] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ , total_limit=2 ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase = accelerator.prepare(UpperCamelCase__ ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def A ( self : Dict ): """simple docstring""" UpperCamelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() ) if __name__ == "__main__": _lowerCamelCase : Optional[int] = "/tmp/accelerate/state_checkpointing" _lowerCamelCase : Union[str, Any] = DummyModel() _lowerCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3) _lowerCamelCase : List[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) _lowerCamelCase ,_lowerCamelCase : Tuple = dummy_dataloaders() _lowerCamelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline _lowerCamelCase : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Union[str, Any] = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) _lowerCamelCase ,_lowerCamelCase : Tuple = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: _lowerCamelCase : Any = group["params"][0].device break assert param_device.type == accelerator.device.type _lowerCamelCase : Tuple = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: _lowerCamelCase : Optional[Any] = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: _lowerCamelCase : Dict = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
28
1
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" @staticmethod @abstractmethod def A ( UpperCamelCase__ : ArgumentParser ): """simple docstring""" raise NotImplementedError() @abstractmethod def A ( self : Any ): """simple docstring""" raise NotImplementedError()
28
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _lowerCamelCase : List[str] = 5_0000 _lowerCamelCase : Optional[int] = 5000 _lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__) _lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" for i in range(A__ ): UpperCamelCase = dataset[i] @get_duration def __lowerCamelCase ( A__ , A__ , A__ ) -> int: """simple docstring""" for i in range(0 , len(A__ ) , A__ ): UpperCamelCase = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]: """simple docstring""" with dataset.formatted_as(type=A__ ): for i in range(A__ ): UpperCamelCase = dataset[i] @get_duration def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int: """simple docstring""" with dataset.formatted_as(type=A__ ): for i in range(0 , A__ , A__ ): UpperCamelCase = dataset[i : i + batch_size] def __lowerCamelCase ( ) -> List[str]: """simple docstring""" UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES} UpperCamelCase = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}), ] UpperCamelCase = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) UpperCamelCase = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) UpperCamelCase = generate_example_dataset( os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(A__ ) ) UpperCamelCase = func(A__ , **A__ ) print('shuffling dataset' ) UpperCamelCase = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(A__ ) ) UpperCamelCase = func( A__ , **A__ ) with open(A__ , 'wb' ) as f: f.write(json.dumps(A__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
28
1
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger(__name__) _lowerCamelCase : int = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } _lowerCamelCase : List[Any] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> List[Any]: """simple docstring""" for attribute in key.split('.' ): UpperCamelCase = getattr(A__ , A__ ) if weight_type is not None: UpperCamelCase = getattr(A__ , A__ ).shape else: UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCamelCase = value elif weight_type == "weight_g": UpperCamelCase = value elif weight_type == "weight_v": UpperCamelCase = value elif weight_type == "bias": UpperCamelCase = value else: UpperCamelCase = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __lowerCamelCase ( A__ , A__ ) -> Dict: """simple docstring""" UpperCamelCase = [] UpperCamelCase = fairseq_model.state_dict() UpperCamelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase = False if "conv_layers" in name: load_conv_layer( A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue UpperCamelCase = True if "*" in mapped_key: UpperCamelCase = name.split(A__ )[0].split('.' )[-2] UpperCamelCase = mapped_key.replace('*' , A__ ) if "weight_g" in name: UpperCamelCase = 'weight_g' elif "weight_v" in name: UpperCamelCase = 'weight_v' elif "bias" in name: UpperCamelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase = 'weight' else: UpperCamelCase = None set_recursively(A__ , A__ , A__ , A__ , A__ ) continue if not is_used: unused_weights.append(A__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Dict: """simple docstring""" UpperCamelCase = full_name.split('conv_layers.' )[-1] UpperCamelCase = name.split('.' ) UpperCamelCase = int(items[0] ) UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(A__ ) @torch.no_grad() def __lowerCamelCase ( A__ , A__ , A__=None , A__=None , A__=True ) -> int: """simple docstring""" if config_path is not None: UpperCamelCase = UniSpeechSatConfig.from_pretrained(A__ ) else: UpperCamelCase = UniSpeechSatConfig() UpperCamelCase = '' if is_finetuned: UpperCamelCase = UniSpeechSatForCTC(A__ ) else: UpperCamelCase = UniSpeechSatForPreTraining(A__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) UpperCamelCase = model[0].eval() recursively_load_weights(A__ , A__ ) hf_wavavec.save_pretrained(A__ ) if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _lowerCamelCase : Any = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
28
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets _lowerCamelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n" _lowerCamelCase : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n" _lowerCamelCase : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def A ( self : Union[str, Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False ): """simple docstring""" if rouge_types is None: UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ ) if use_aggregator: UpperCamelCase = scoring.BootstrapAggregator() else: UpperCamelCase = [] for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = scorer.score(UpperCamelCase__ , UpperCamelCase__ ) if use_aggregator: aggregator.add_scores(UpperCamelCase__ ) else: scores.append(UpperCamelCase__ ) if use_aggregator: UpperCamelCase = aggregator.aggregate() else: UpperCamelCase = {} for key in scores[0]: UpperCamelCase = [score[key] for score in scores] return result
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCamelCase : Dict = { "configuration_groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = [ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' from PIL import Image def __lowerCamelCase ( A__ , A__ ) -> Image: """simple docstring""" def brightness(A__ ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(A__ ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 _lowerCamelCase : List[str] = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
28
1
'''simple docstring''' import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE ( _a , _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 1 @register_to_config def __init__( self : Dict , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ): """simple docstring""" self.set_timesteps(UpperCamelCase__ ) # standard deviation of the initial noise distribution UpperCamelCase = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. UpperCamelCase = 4 # running values UpperCamelCase = [] def A ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ): """simple docstring""" UpperCamelCase = num_inference_steps UpperCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] UpperCamelCase = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: UpperCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: UpperCamelCase = torch.sin(steps * math.pi / 2 ) ** 2 UpperCamelCase = (1.0 - self.betas**2) ** 0.5 UpperCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] UpperCamelCase = timesteps.to(UpperCamelCase__ ) UpperCamelCase = [] def A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ): """simple docstring""" if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) UpperCamelCase = (self.timesteps == timestep).nonzero().item() UpperCamelCase = timestep_index + 1 UpperCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(UpperCamelCase__ ) if len(self.ets ) == 1: UpperCamelCase = self.ets[-1] elif len(self.ets ) == 2: UpperCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: UpperCamelCase = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2 else: UpperCamelCase = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4]) UpperCamelCase = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase__ ) def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ): """simple docstring""" return sample def A ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any ): """simple docstring""" UpperCamelCase = self.alphas[timestep_index] UpperCamelCase = self.betas[timestep_index] UpperCamelCase = self.alphas[prev_timestep_index] UpperCamelCase = self.betas[prev_timestep_index] UpperCamelCase = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 ) UpperCamelCase = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self : List[str] ): """simple docstring""" return self.config.num_train_timesteps
28
'''simple docstring''' from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
28
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) def __lowerCamelCase ( A__ ) -> Dict: """simple docstring""" UpperCamelCase = 'huggingface/label-files' UpperCamelCase = 'imagenet-1k-id2label.json' UpperCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) UpperCamelCase = {int(A__ ): v for k, v in idalabel.items()} UpperCamelCase = {v: k for k, v in idalabel.items()} UpperCamelCase = 'std_conv' if 'bit' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" UpperCamelCase = BitConfig( conv_layer=A__ , num_labels=1_000 , idalabel=A__ , labelaid=A__ , ) return config def __lowerCamelCase ( A__ ) -> int: """simple docstring""" if "stem.conv" in name: UpperCamelCase = name.replace('stem.conv' , 'bit.embedder.convolution' ) if "blocks" in name: UpperCamelCase = name.replace('blocks' , 'layers' ) if "head.fc" in name: UpperCamelCase = name.replace('head.fc' , 'classifier.1' ) if name.startswith('norm' ): UpperCamelCase = 'bit.' + name if "bit" not in name and "classifier" not in name: UpperCamelCase = 'bit.encoder.' + name return name def __lowerCamelCase ( ) -> str: """simple docstring""" UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( A__ , A__ , A__=False ) -> Tuple: """simple docstring""" UpperCamelCase = get_config(A__ ) # load original model from timm UpperCamelCase = create_model(A__ , pretrained=A__ ) timm_model.eval() # load state_dict of original model UpperCamelCase = timm_model.state_dict() for key in state_dict.copy().keys(): UpperCamelCase = state_dict.pop(A__ ) UpperCamelCase = val.squeeze() if 'head' in key else val # load HuggingFace model UpperCamelCase = BitForImageClassification(A__ ) model.eval() model.load_state_dict(A__ ) # create image processor UpperCamelCase = create_transform(**resolve_data_config({} , model=A__ ) ) UpperCamelCase = transform.transforms UpperCamelCase = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } UpperCamelCase = BitImageProcessor( do_resize=A__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) UpperCamelCase = prepare_img() UpperCamelCase = transform(A__ ).unsqueeze(0 ) UpperCamelCase = processor(A__ , return_tensors='pt' ).pixel_values # verify pixel values assert torch.allclose(A__ , A__ ) # verify logits with torch.no_grad(): UpperCamelCase = model(A__ ) UpperCamelCase = outputs.logits print('Logits:' , logits[0, :3] ) print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] ) UpperCamelCase = timm_model(A__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A__ , outputs.logits , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(A__ ).mkdir(exist_ok=A__ ) print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) processor.save_pretrained(A__ ) if push_to_hub: print(F"""Pushing model {model_name} and processor to the hub""" ) model.push_to_hub(F"""ybelkada/{model_name}""" ) processor.push_to_hub(F"""ybelkada/{model_name}""" ) if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) _lowerCamelCase : Tuple = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
28
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Optional[int]=3_6 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Union[str, Any]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def A ( self : int ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Optional[int] ): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.get_config() UpperCamelCase = 3_0_0 return config def A ( self : Tuple ): """simple docstring""" ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = self.prepare_config_and_inputs() UpperCamelCase = True UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): """simple docstring""" UpperCamelCase = MraModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , ): """simple docstring""" UpperCamelCase = True UpperCamelCase = MraModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = MraForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = MraForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MraForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MraForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict ): """simple docstring""" UpperCamelCase = self.num_choices UpperCamelCase = MraForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = () def A ( self : str ): """simple docstring""" UpperCamelCase = MraModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : str ): """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def A ( self : List[Any] ): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = MraModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @unittest.skip(reason='MRA does not output attentions' ) def A ( self : List[str] ): """simple docstring""" return @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = torch.Size((1, 2_5_6, 7_6_8) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = 5_0_2_6_5 UpperCamelCase = torch.Size((1, 2_5_6, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) UpperCamelCase = torch.arange(4_0_9_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = 5_0_2_6_5 UpperCamelCase = torch.Size((1, 4_0_9_6, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
28
1
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Dict = logging.get_logger(__name__) def __lowerCamelCase ( A__ ) -> Union[str, Any]: """simple docstring""" print('Loading config file...' ) def flatten_yaml_as_dict(A__ , A__="" , A__="." ): UpperCamelCase = [] for k, v in d.items(): UpperCamelCase = parent_key + sep + k if parent_key else k if isinstance(A__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(A__ , A__ , sep=A__ ).items() ) else: items.append((new_key, v) ) return dict(A__ ) UpperCamelCase = argparse.Namespace() with open(A__ , 'r' ) as yaml_file: try: UpperCamelCase = yaml.load(A__ , Loader=yaml.FullLoader ) UpperCamelCase = flatten_yaml_as_dict(A__ ) for k, v in flat_cfg.items(): setattr(A__ , A__ , A__ ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(A__ , str(A__ ) ) ) return config def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" UpperCamelCase = MobileViTVaConfig() UpperCamelCase = False # dataset if task_name.startswith('imagenet1k_' ): UpperCamelCase = 1_000 if int(task_name.strip().split('_' )[-1] ) == 384: UpperCamelCase = 384 else: UpperCamelCase = 256 UpperCamelCase = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): UpperCamelCase = 21_000 if int(task_name.strip().split('_' )[-1] ) == 384: UpperCamelCase = 384 else: UpperCamelCase = 256 UpperCamelCase = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): UpperCamelCase = 151 UpperCamelCase = 512 UpperCamelCase = 'ade20k-id2label.json' UpperCamelCase = True elif task_name.startswith('voc_' ): UpperCamelCase = 21 UpperCamelCase = 512 UpperCamelCase = 'pascal-voc-id2label.json' UpperCamelCase = True # orig_config UpperCamelCase = load_orig_config_file(A__ ) assert getattr(A__ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model" UpperCamelCase = getattr(A__ , 'model.classification.mitv2.width_multiplier' , 1.0 ) assert ( getattr(A__ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" UpperCamelCase = getattr(A__ , 'model.classification.activation.name' , 'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: UpperCamelCase = getattr(A__ , 'model.segmentation.output_stride' , 16 ) if "_deeplabv3" in task_name: UpperCamelCase = getattr(A__ , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] ) UpperCamelCase = getattr(A__ , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 ) UpperCamelCase = getattr(A__ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 ) # id2label UpperCamelCase = 'huggingface/label-files' UpperCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) UpperCamelCase = {int(A__ ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[int]: """simple docstring""" UpperCamelCase = dct.pop(A__ ) UpperCamelCase = val def __lowerCamelCase ( A__ , A__=False ) -> Tuple: """simple docstring""" if base_model: UpperCamelCase = '' else: UpperCamelCase = 'mobilevitv2.' UpperCamelCase = [] for k in state_dict.keys(): if k[:8] == "encoder.": UpperCamelCase = k[8:] else: UpperCamelCase = k if ".block." in k: UpperCamelCase = k_new.replace('.block.' , '.' ) if ".conv." in k: UpperCamelCase = k_new.replace('.conv.' , '.convolution.' ) if ".norm." in k: UpperCamelCase = k_new.replace('.norm.' , '.normalization.' ) if "conv_1." in k: UpperCamelCase = k_new.replace('conv_1.' , F"""{model_prefix}conv_stem.""" ) for i in [1, 2]: if F"""layer_{i}.""" in k: UpperCamelCase = k_new.replace(F"""layer_{i}.""" , F"""{model_prefix}encoder.layer.{i-1}.layer.""" ) if ".exp_1x1." in k: UpperCamelCase = k_new.replace('.exp_1x1.' , '.expand_1x1.' ) if ".red_1x1." in k: UpperCamelCase = k_new.replace('.red_1x1.' , '.reduce_1x1.' ) for i in [3, 4, 5]: if F"""layer_{i}.0.""" in k: UpperCamelCase = k_new.replace(F"""layer_{i}.0.""" , F"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" ) if F"""layer_{i}.1.local_rep.0.""" in k: UpperCamelCase = k_new.replace(F"""layer_{i}.1.local_rep.0.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" ) if F"""layer_{i}.1.local_rep.1.""" in k: UpperCamelCase = k_new.replace(F"""layer_{i}.1.local_rep.1.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" ) for i in [3, 4, 5]: if i == 3: UpperCamelCase = [0, 1] elif i == 4: UpperCamelCase = [0, 1, 2, 3] elif i == 5: UpperCamelCase = [0, 1, 2] for j in j_in: if F"""layer_{i}.1.global_rep.{j}.""" in k: UpperCamelCase = k_new.replace( F"""layer_{i}.1.global_rep.{j}.""" , F"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" ) if F"""layer_{i}.1.global_rep.{j+1}.""" in k: UpperCamelCase = k_new.replace( F"""layer_{i}.1.global_rep.{j+1}.""" , F"""{model_prefix}encoder.layer.{i-1}.layernorm.""" ) if F"""layer_{i}.1.conv_proj.""" in k: UpperCamelCase = k_new.replace(F"""layer_{i}.1.conv_proj.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" ) if "pre_norm_attn.0." in k: UpperCamelCase = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' ) if "pre_norm_attn.1." in k: UpperCamelCase = k_new.replace('pre_norm_attn.1.' , 'attention.' ) if "pre_norm_ffn.0." in k: UpperCamelCase = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' ) if "pre_norm_ffn.1." in k: UpperCamelCase = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' ) if "pre_norm_ffn.3." in k: UpperCamelCase = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' ) if "classifier.1." in k: UpperCamelCase = k_new.replace('classifier.1.' , 'classifier.' ) if "seg_head." in k: UpperCamelCase = k_new.replace('seg_head.' , 'segmentation_head.' ) if ".aspp_layer." in k: UpperCamelCase = k_new.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in k: UpperCamelCase = k_new.replace('.aspp_pool.' , '.' ) rename_keys.append((k, k_new) ) return rename_keys def __lowerCamelCase ( A__ ) -> List[Any]: """simple docstring""" UpperCamelCase = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(A__ ) for k in keys_to_ignore: state_dict.pop(A__ , A__ ) def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" UpperCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Tuple: """simple docstring""" UpperCamelCase = get_mobilevitva_config(A__ , A__ ) # load original state_dict UpperCamelCase = torch.load(A__ , map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): UpperCamelCase = MobileViTVaForSemanticSegmentation(A__ ).eval() UpperCamelCase = False else: UpperCamelCase = MobileViTVaForImageClassification(A__ ).eval() UpperCamelCase = False # remove and rename some keys of load the original model UpperCamelCase = checkpoint remove_unused_keys(A__ ) UpperCamelCase = create_rename_keys(A__ , base_model=A__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(A__ , A__ , A__ ) # load modified state_dict model.load_state_dict(A__ ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCamelCase = image_processor(images=prepare_img() , return_tensors='pt' ) UpperCamelCase = model(**A__ ) # verify classification model if task_name.startswith('imagenet' ): UpperCamelCase = outputs.logits UpperCamelCase = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant UpperCamelCase = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , A__ , atol=1e-4 ) Path(A__ ).mkdir(exist_ok=A__ ) print(F"""Saving model {task_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": _lowerCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _lowerCamelCase : Tuple = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _lowerCamelCase : Union[str, Any] = "\\n\n" _lowerCamelCase : List[str] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" _lowerCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def A ( self : Tuple ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , ) def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int = 1_6 , UpperCamelCase__ : bool = True , UpperCamelCase__ : List[Any]=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCamelCase = 'cuda' else: UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu' UpperCamelCase = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ ) UpperCamelCase = model.to(UpperCamelCase__ ) UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(UpperCamelCase__ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCamelCase = model.config.max_length - 1 else: UpperCamelCase = model.config.max_length UpperCamelCase = tokenizer( UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='pt' , return_attention_mask=UpperCamelCase__ , ).to(UpperCamelCase__ ) UpperCamelCase = encodings['input_ids'] UpperCamelCase = encodings['attention_mask'] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCamelCase = [] UpperCamelCase = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ) ): UpperCamelCase = min(start_index + batch_size , len(UpperCamelCase__ ) ) UpperCamelCase = encoded_texts[start_index:end_index] UpperCamelCase = attn_masks[start_index:end_index] if add_start_token: UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase__ ) UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCamelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase__ ), attn_mask] , dim=1 ) UpperCamelCase = encoded_batch with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ).logits UpperCamelCase = out_logits[..., :-1, :].contiguous() UpperCamelCase = labels[..., 1:].contiguous() UpperCamelCase = attn_mask[..., 1:].contiguous() UpperCamelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase__ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase__ )}
28
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool _lowerCamelCase : Optional[int] = { "Acehnese Arabic": "ace_Arab", "Acehnese Latin": "ace_Latn", "Mesopotamian Arabic": "acm_Arab", "Ta'izzi-Adeni Arabic": "acq_Arab", "Tunisian Arabic": "aeb_Arab", "Afrikaans": "afr_Latn", "South Levantine Arabic": "ajp_Arab", "Akan": "aka_Latn", "Amharic": "amh_Ethi", "North Levantine Arabic": "apc_Arab", "Modern Standard Arabic": "arb_Arab", "Modern Standard Arabic Romanized": "arb_Latn", "Najdi Arabic": "ars_Arab", "Moroccan Arabic": "ary_Arab", "Egyptian Arabic": "arz_Arab", "Assamese": "asm_Beng", "Asturian": "ast_Latn", "Awadhi": "awa_Deva", "Central Aymara": "ayr_Latn", "South Azerbaijani": "azb_Arab", "North Azerbaijani": "azj_Latn", "Bashkir": "bak_Cyrl", "Bambara": "bam_Latn", "Balinese": "ban_Latn", "Belarusian": "bel_Cyrl", "Bemba": "bem_Latn", "Bengali": "ben_Beng", "Bhojpuri": "bho_Deva", "Banjar Arabic": "bjn_Arab", "Banjar Latin": "bjn_Latn", "Standard Tibetan": "bod_Tibt", "Bosnian": "bos_Latn", "Buginese": "bug_Latn", "Bulgarian": "bul_Cyrl", "Catalan": "cat_Latn", "Cebuano": "ceb_Latn", "Czech": "ces_Latn", "Chokwe": "cjk_Latn", "Central Kurdish": "ckb_Arab", "Crimean Tatar": "crh_Latn", "Welsh": "cym_Latn", "Danish": "dan_Latn", "German": "deu_Latn", "Southwestern Dinka": "dik_Latn", "Dyula": "dyu_Latn", "Dzongkha": "dzo_Tibt", "Greek": "ell_Grek", "English": "eng_Latn", "Esperanto": "epo_Latn", "Estonian": "est_Latn", "Basque": "eus_Latn", "Ewe": "ewe_Latn", "Faroese": "fao_Latn", "Fijian": "fij_Latn", "Finnish": "fin_Latn", "Fon": "fon_Latn", "French": "fra_Latn", "Friulian": "fur_Latn", "Nigerian Fulfulde": "fuv_Latn", "Scottish Gaelic": "gla_Latn", "Irish": "gle_Latn", "Galician": "glg_Latn", "Guarani": "grn_Latn", "Gujarati": "guj_Gujr", "Haitian Creole": "hat_Latn", "Hausa": "hau_Latn", "Hebrew": "heb_Hebr", "Hindi": "hin_Deva", "Chhattisgarhi": "hne_Deva", "Croatian": "hrv_Latn", "Hungarian": "hun_Latn", "Armenian": "hye_Armn", "Igbo": "ibo_Latn", "Ilocano": "ilo_Latn", "Indonesian": "ind_Latn", "Icelandic": "isl_Latn", "Italian": "ita_Latn", "Javanese": "jav_Latn", "Japanese": "jpn_Jpan", "Kabyle": "kab_Latn", "Jingpho": "kac_Latn", "Kamba": "kam_Latn", "Kannada": "kan_Knda", "Kashmiri Arabic": "kas_Arab", "Kashmiri Devanagari": "kas_Deva", "Georgian": "kat_Geor", "Central Kanuri Arabic": "knc_Arab", "Central Kanuri Latin": "knc_Latn", "Kazakh": "kaz_Cyrl", "Kabiyè": "kbp_Latn", "Kabuverdianu": "kea_Latn", "Khmer": "khm_Khmr", "Kikuyu": "kik_Latn", "Kinyarwanda": "kin_Latn", "Kyrgyz": "kir_Cyrl", "Kimbundu": "kmb_Latn", "Northern Kurdish": "kmr_Latn", "Kikongo": "kon_Latn", "Korean": "kor_Hang", "Lao": "lao_Laoo", "Ligurian": "lij_Latn", "Limburgish": "lim_Latn", "Lingala": "lin_Latn", "Lithuanian": "lit_Latn", "Lombard": "lmo_Latn", "Latgalian": "ltg_Latn", "Luxembourgish": "ltz_Latn", "Luba-Kasai": "lua_Latn", "Ganda": "lug_Latn", "Luo": "luo_Latn", "Mizo": "lus_Latn", "Standard Latvian": "lvs_Latn", "Magahi": "mag_Deva", "Maithili": "mai_Deva", "Malayalam": "mal_Mlym", "Marathi": "mar_Deva", "Minangkabau Arabic ": "min_Arab", "Minangkabau Latin": "min_Latn", "Macedonian": "mkd_Cyrl", "Plateau Malagasy": "plt_Latn", "Maltese": "mlt_Latn", "Meitei Bengali": "mni_Beng", "Halh Mongolian": "khk_Cyrl", "Mossi": "mos_Latn", "Maori": "mri_Latn", "Burmese": "mya_Mymr", "Dutch": "nld_Latn", "Norwegian Nynorsk": "nno_Latn", "Norwegian Bokmål": "nob_Latn", "Nepali": "npi_Deva", "Northern Sotho": "nso_Latn", "Nuer": "nus_Latn", "Nyanja": "nya_Latn", "Occitan": "oci_Latn", "West Central Oromo": "gaz_Latn", "Odia": "ory_Orya", "Pangasinan": "pag_Latn", "Eastern Panjabi": "pan_Guru", "Papiamento": "pap_Latn", "Western Persian": "pes_Arab", "Polish": "pol_Latn", "Portuguese": "por_Latn", "Dari": "prs_Arab", "Southern Pashto": "pbt_Arab", "Ayacucho Quechua": "quy_Latn", "Romanian": "ron_Latn", "Rundi": "run_Latn", "Russian": "rus_Cyrl", "Sango": "sag_Latn", "Sanskrit": "san_Deva", "Santali": "sat_Olck", "Sicilian": "scn_Latn", "Shan": "shn_Mymr", "Sinhala": "sin_Sinh", "Slovak": "slk_Latn", "Slovenian": "slv_Latn", "Samoan": "smo_Latn", "Shona": "sna_Latn", "Sindhi": "snd_Arab", "Somali": "som_Latn", "Southern Sotho": "sot_Latn", "Spanish": "spa_Latn", "Tosk Albanian": "als_Latn", "Sardinian": "srd_Latn", "Serbian": "srp_Cyrl", "Swati": "ssw_Latn", "Sundanese": "sun_Latn", "Swedish": "swe_Latn", "Swahili": "swh_Latn", "Silesian": "szl_Latn", "Tamil": "tam_Taml", "Tatar": "tat_Cyrl", "Telugu": "tel_Telu", "Tajik": "tgk_Cyrl", "Tagalog": "tgl_Latn", "Thai": "tha_Thai", "Tigrinya": "tir_Ethi", "Tamasheq Latin": "taq_Latn", "Tamasheq Tifinagh": "taq_Tfng", "Tok Pisin": "tpi_Latn", "Tswana": "tsn_Latn", "Tsonga": "tso_Latn", "Turkmen": "tuk_Latn", "Tumbuka": "tum_Latn", "Turkish": "tur_Latn", "Twi": "twi_Latn", "Central Atlas Tamazight": "tzm_Tfng", "Uyghur": "uig_Arab", "Ukrainian": "ukr_Cyrl", "Umbundu": "umb_Latn", "Urdu": "urd_Arab", "Northern Uzbek": "uzn_Latn", "Venetian": "vec_Latn", "Vietnamese": "vie_Latn", "Waray": "war_Latn", "Wolof": "wol_Latn", "Xhosa": "xho_Latn", "Eastern Yiddish": "ydd_Hebr", "Yoruba": "yor_Latn", "Yue Chinese": "yue_Hant", "Chinese Simplified": "zho_Hans", "Chinese Traditional": "zho_Hant", "Standard Malay": "zsm_Latn", "Zulu": "zul_Latn", } class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = """facebook/nllb-200-distilled-600M""" _SCREAMING_SNAKE_CASE = ( """This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """ """be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """ """which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """ """plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`.""" ) _SCREAMING_SNAKE_CASE = """translator""" _SCREAMING_SNAKE_CASE = AutoTokenizer _SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM _SCREAMING_SNAKE_CASE = LANGUAGE_CODES _SCREAMING_SNAKE_CASE = ["""text""", """text""", """text"""] _SCREAMING_SNAKE_CASE = ["""text"""] def A ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ): """simple docstring""" if src_lang not in self.lang_to_code: raise ValueError(f"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(f"""{tgt_lang} is not a supported language.""" ) UpperCamelCase = self.lang_to_code[src_lang] UpperCamelCase = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( UpperCamelCase__ , return_tensors='pt' , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ ) def A ( self : List[Any] , UpperCamelCase__ : Any ): """simple docstring""" return self.model.generate(**UpperCamelCase__ ) def A ( self : Dict , UpperCamelCase__ : int ): """simple docstring""" return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__ )
28
'''simple docstring''' def __lowerCamelCase ( A__ = 50 ) -> int: """simple docstring""" UpperCamelCase = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
28
1
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Optional[Any] = { "EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = """gptj""" _SCREAMING_SNAKE_CASE = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Optional[int] , UpperCamelCase__ : Dict=5_0_4_0_0 , UpperCamelCase__ : int=2_0_4_8 , UpperCamelCase__ : Tuple=4_0_9_6 , UpperCamelCase__ : Tuple=2_8 , UpperCamelCase__ : Union[str, Any]=1_6 , UpperCamelCase__ : Optional[Any]=6_4 , UpperCamelCase__ : Any=None , UpperCamelCase__ : int="gelu_new" , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Any=1E-5 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=5_0_2_5_6 , UpperCamelCase__ : Dict=5_0_2_5_6 , UpperCamelCase__ : Dict=False , **UpperCamelCase__ : Union[str, Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = n_positions UpperCamelCase = n_embd UpperCamelCase = n_layer UpperCamelCase = n_head UpperCamelCase = n_inner UpperCamelCase = rotary_dim UpperCamelCase = activation_function UpperCamelCase = resid_pdrop UpperCamelCase = embd_pdrop UpperCamelCase = attn_pdrop UpperCamelCase = layer_norm_epsilon UpperCamelCase = initializer_range UpperCamelCase = use_cache UpperCamelCase = bos_token_id UpperCamelCase = eos_token_id super().__init__( bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Optional[Any] , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : str = "default" , UpperCamelCase__ : List[PatchingSpec] = None , UpperCamelCase__ : bool = False , ): """simple docstring""" super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ ) if not getattr(self._config , 'pad_token_id' , UpperCamelCase__ ): # TODO: how to do that better? UpperCamelCase = 0 @property def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' ) UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: UpperCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def A ( self : int ): """simple docstring""" return self._config.n_layer @property def A ( self : str ): """simple docstring""" return self._config.n_head def A ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ): """simple docstring""" UpperCamelCase = super(UpperCamelCase__ , self ).generate_dummy_inputs( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) # We need to order the input in the way they appears in the forward() UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values UpperCamelCase = seqlen + 2 UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCamelCase = [ (torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers ) ] UpperCamelCase = common_inputs['attention_mask'] if self.use_past: UpperCamelCase = ordered_inputs['attention_mask'].dtype UpperCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 ) return ordered_inputs @property def A ( self : Union[str, Any] ): """simple docstring""" return 1_3
28
'''simple docstring''' def __lowerCamelCase ( A__ ) -> list: """simple docstring""" UpperCamelCase = len(A__ ) for i in range(1 , A__ ): UpperCamelCase = collection[i] UpperCamelCase = 0 UpperCamelCase = i - 1 while low <= high: UpperCamelCase = (low + high) // 2 if val < collection[mid]: UpperCamelCase = mid - 1 else: UpperCamelCase = mid + 1 for j in range(A__ , A__ , -1 ): UpperCamelCase = collection[j - 1] UpperCamelCase = val return collection if __name__ == "__main__": _lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip() _lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
28
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : List[str] = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json", "google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json", "google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = """big_bird""" def __init__( self : Optional[int] , UpperCamelCase__ : Tuple=5_0_3_5_8 , UpperCamelCase__ : Tuple=7_6_8 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=1_2 , UpperCamelCase__ : List[str]=3_0_7_2 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[Any]=4_0_9_6 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : List[str]=1E-1_2 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : int=0 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Any=6_6 , UpperCamelCase__ : Dict="block_sparse" , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[str]=6_4 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : int=None , **UpperCamelCase__ : Optional[Any] , ): """simple docstring""" super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , sep_token_id=UpperCamelCase__ , **UpperCamelCase__ , ) UpperCamelCase = vocab_size UpperCamelCase = max_position_embeddings UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = initializer_range UpperCamelCase = type_vocab_size UpperCamelCase = layer_norm_eps UpperCamelCase = use_cache UpperCamelCase = rescale_embeddings UpperCamelCase = attention_type UpperCamelCase = use_bias UpperCamelCase = block_size UpperCamelCase = num_random_blocks UpperCamelCase = classifier_dropout class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" @property def A ( self : Tuple ): """simple docstring""" if self.task == "multiple-choice": UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
28
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = None def __lowerCamelCase ( A__ , A__=0.999 , A__="cosine" , ) -> Tuple: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(A__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A__ ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCamelCase = [] for i in range(A__ ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) ) return torch.tensor(A__ , dtype=torch.floataa ) class SCREAMING_SNAKE_CASE ( _a , _a ): """simple docstring""" @register_to_config def __init__( self : List[str] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : str = "fixed_small_log" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[float] = 1.0 , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : str = "squaredcos_cap_v2" , ): """simple docstring""" if beta_schedule != "squaredcos_cap_v2": raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' ) UpperCamelCase = betas_for_alpha_bar(UpperCamelCase__ ) UpperCamelCase = 1.0 - self.betas UpperCamelCase = torch.cumprod(self.alphas , dim=0 ) UpperCamelCase = torch.tensor(1.0 ) # standard deviation of the initial noise distribution UpperCamelCase = 1.0 # setable values UpperCamelCase = None UpperCamelCase = torch.from_numpy(np.arange(0 , UpperCamelCase__ )[::-1].copy() ) UpperCamelCase = variance_type def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ): """simple docstring""" return sample def A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ): """simple docstring""" UpperCamelCase = num_inference_steps UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) UpperCamelCase = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ ) def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None ): """simple docstring""" if prev_timestep is None: UpperCamelCase = t - 1 UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase = self.betas[t] else: UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: UpperCamelCase = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": UpperCamelCase = torch.log(torch.clamp(UpperCamelCase__ , min=1E-2_0 ) ) UpperCamelCase = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler UpperCamelCase = variance.log() UpperCamelCase = beta.log() UpperCamelCase = (predicted_variance + 1) / 2 UpperCamelCase = frac * max_log + (1 - frac) * min_log return variance def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 ) else: UpperCamelCase = None # 1. compute alphas, betas if prev_timestep is None: UpperCamelCase = t - 1 UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase = self.betas[t] UpperCamelCase = self.alphas[t] else: UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev UpperCamelCase = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCamelCase = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" ' for the UnCLIPScheduler.' ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCamelCase = torch.clamp( UpperCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCamelCase = 0 if t > 0: UpperCamelCase = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ , device=model_output.device ) UpperCamelCase = self._get_variance( UpperCamelCase__ , predicted_variance=UpperCamelCase__ , prev_timestep=UpperCamelCase__ , ) if self.variance_type == "fixed_small_log": UpperCamelCase = variance elif self.variance_type == "learned_range": UpperCamelCase = (0.5 * variance).exp() else: raise ValueError( f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" ' for the UnCLIPScheduler.' ) UpperCamelCase = variance * variance_noise UpperCamelCase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.IntTensor , ): """simple docstring""" UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) UpperCamelCase = timesteps.to(original_samples.device ) UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
28
1
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) _lowerCamelCase : Union[str, Any] = logging.getLogger(__name__) def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" UpperCamelCase = argparse.ArgumentParser( description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' ) parser.add_argument('--file_path' , type=A__ , default='data/dump.txt' , help='The path to the data.' ) parser.add_argument('--tokenizer_type' , type=A__ , default='bert' , choices=['bert', 'roberta', 'gpt2'] ) parser.add_argument('--tokenizer_name' , type=A__ , default='bert-base-uncased' , help='The tokenizer to use.' ) parser.add_argument('--dump_file' , type=A__ , default='data/dump' , help='The dump file prefix.' ) UpperCamelCase = parser.parse_args() logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" ) if args.tokenizer_type == "bert": UpperCamelCase = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCamelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]` UpperCamelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCamelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCamelCase = tokenizer.special_tokens_map['cls_token'] # `<s>` UpperCamelCase = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == "gpt2": UpperCamelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCamelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` UpperCamelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(F"""Loading text from {args.file_path}""" ) with open(args.file_path , 'r' , encoding='utf8' ) as fp: UpperCamelCase = fp.readlines() logger.info('Start encoding' ) logger.info(F"""{len(A__ )} examples to process.""" ) UpperCamelCase = [] UpperCamelCase = 0 UpperCamelCase = 10_000 UpperCamelCase = time.time() for text in data: UpperCamelCase = F"""{bos} {text.strip()} {sep}""" UpperCamelCase = tokenizer.encode(A__ , add_special_tokens=A__ ) rslt.append(A__ ) iter += 1 if iter % interval == 0: UpperCamelCase = time.time() logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" ) UpperCamelCase = time.time() logger.info('Finished binarization' ) logger.info(F"""{len(A__ )} examples processed.""" ) UpperCamelCase = F"""{args.dump_file}.{args.tokenizer_name}.pickle""" UpperCamelCase = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCamelCase = [np.uintaa(A__ ) for d in rslt] else: UpperCamelCase = [np.intaa(A__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"""Dump to {dp_file}""" ) with open(A__ , 'wb' ) as handle: pickle.dump(rslt_ , A__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
28
'''simple docstring''' import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = num_stages UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = initializer_range UpperCamelCase = out_features UpperCamelCase = out_indices UpperCamelCase = scope def A ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def A ( self : List[str] ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = ConvNextModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( {"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def A ( self : Tuple ): """simple docstring""" UpperCamelCase = ConvNextModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : List[str] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : Optional[int] ): """simple docstring""" return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def A ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def A ( self : List[Any] ): """simple docstring""" pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def A ( self : Optional[int] ): """simple docstring""" pass def A ( self : Any ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(UpperCamelCase__ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def A ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ): UpperCamelCase = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def A ( self : Dict ): """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def __lowerCamelCase ( ) -> Any: """simple docstring""" UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def A ( self : Optional[Any] ): """simple docstring""" return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**UpperCamelCase__ ) # verify the logits UpperCamelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else () _SCREAMING_SNAKE_CASE = ConvNextConfig _SCREAMING_SNAKE_CASE = False def A ( self : Tuple ): """simple docstring""" UpperCamelCase = ConvNextModelTester(self )
28
1
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def A ( self : int , UpperCamelCase__ : Tuple ): """simple docstring""" if isinstance(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = [label.strip() for label in labels.split(',' ) if label.strip()] return labels def __call__( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ): """simple docstring""" if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) == 0: raise ValueError('You must include at least one label and at least one sequence.' ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( 'The provided hypothesis_template "{}" was not able to be formatted with the target labels. ' 'Make sure the passed template includes formatting syntax such as {{}} where the label should go.' ).format(UpperCamelCase__ ) ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = [sequences] UpperCamelCase = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(UpperCamelCase__ )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(_a ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase__ : Any=ZeroShotClassificationArgumentHandler() , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = args_parser super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) if self.entailment_id == -1: logger.warning( 'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to ' '-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' ) @property def A ( self : List[Any] ): """simple docstring""" for label, ind in self.model.config.labelaid.items(): if label.lower().startswith('entail' ): return ind return -1 def A ( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=TruncationStrategy.ONLY_FIRST , **UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( 'Tokenizer was not supporting padding necessary for zero-shot, attempting to use ' ' `pad_token=eos_token`' ) UpperCamelCase = self.tokenizer.eos_token try: UpperCamelCase = self.tokenizer( UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , ) except Exception as e: if "too short" in str(UpperCamelCase__ ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. UpperCamelCase = self.tokenizer( UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def A ( self : Optional[Any] , **UpperCamelCase__ : List[str] ): """simple docstring""" if kwargs.get('multi_class' , UpperCamelCase__ ) is not None: UpperCamelCase = kwargs['multi_class'] logger.warning( 'The `multi_class` argument has been deprecated and renamed to `multi_label`. ' '`multi_class` will be removed in a future version of Transformers.' ) UpperCamelCase = {} if "candidate_labels" in kwargs: UpperCamelCase = self._args_parser._parse_labels(kwargs['candidate_labels'] ) if "hypothesis_template" in kwargs: UpperCamelCase = kwargs['hypothesis_template'] UpperCamelCase = {} if "multi_label" in kwargs: UpperCamelCase = kwargs['multi_label'] return preprocess_params, {}, postprocess_params def __call__( self : int , UpperCamelCase__ : Union[str, List[str]] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any , ): """simple docstring""" if len(UpperCamelCase__ ) == 0: pass elif len(UpperCamelCase__ ) == 1 and "candidate_labels" not in kwargs: UpperCamelCase = args[0] else: raise ValueError(f"""Unable to understand extra arguments {args}""" ) return super().__call__(UpperCamelCase__ , **UpperCamelCase__ ) def A ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]="This example is {}." ): """simple docstring""" UpperCamelCase , UpperCamelCase = self._args_parser(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for i, (candidate_label, sequence_pair) in enumerate(zip(UpperCamelCase__ , UpperCamelCase__ ) ): UpperCamelCase = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(UpperCamelCase__ ) - 1, **model_input, } def A ( self : Optional[Any] , UpperCamelCase__ : List[Any] ): """simple docstring""" UpperCamelCase = inputs['candidate_label'] UpperCamelCase = inputs['sequence'] UpperCamelCase = {k: inputs[k] for k in self.tokenizer.model_input_names} UpperCamelCase = self.model(**UpperCamelCase__ ) UpperCamelCase = { 'candidate_label': candidate_label, 'sequence': sequence, 'is_last': inputs['is_last'], **outputs, } return model_outputs def A ( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int]=False ): """simple docstring""" UpperCamelCase = [outputs['candidate_label'] for outputs in model_outputs] UpperCamelCase = [outputs['sequence'] for outputs in model_outputs] UpperCamelCase = np.concatenate([output['logits'].numpy() for output in model_outputs] ) UpperCamelCase = logits.shape[0] UpperCamelCase = len(UpperCamelCase__ ) UpperCamelCase = N // n UpperCamelCase = logits.reshape((num_sequences, n, -1) ) if multi_label or len(UpperCamelCase__ ) == 1: # softmax over the entailment vs. contradiction dim for each label independently UpperCamelCase = self.entailment_id UpperCamelCase = -1 if entailment_id == 0 else 0 UpperCamelCase = reshaped_outputs[..., [contradiction_id, entailment_id]] UpperCamelCase = np.exp(UpperCamelCase__ ) / np.exp(UpperCamelCase__ ).sum(-1 , keepdims=UpperCamelCase__ ) UpperCamelCase = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels UpperCamelCase = reshaped_outputs[..., self.entailment_id] UpperCamelCase = np.exp(UpperCamelCase__ ) / np.exp(UpperCamelCase__ ).sum(-1 , keepdims=UpperCamelCase__ ) UpperCamelCase = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
28
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : int = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _lowerCamelCase : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict: """simple docstring""" UpperCamelCase = state_dict.pop(A__ ) UpperCamelCase = val def __lowerCamelCase ( A__ ) -> int: """simple docstring""" UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def __lowerCamelCase ( A__ ) -> Dict: """simple docstring""" UpperCamelCase = '' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase = state_dict.pop( F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase = in_proj_weight_cross_attn[:256, :] UpperCamelCase = in_proj_bias_cross_attn[:256] UpperCamelCase = in_proj_weight_cross_attn[256:512, :] UpperCamelCase = in_proj_bias_cross_attn[256:512] UpperCamelCase = in_proj_weight_cross_attn[-256:, :] UpperCamelCase = in_proj_bias_cross_attn[-256:] def __lowerCamelCase ( A__ , A__ ) -> Optional[int]: """simple docstring""" UpperCamelCase , UpperCamelCase = image.size UpperCamelCase = max(A__ , A__ ) UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000 UpperCamelCase = target_max_size / current_max_size UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def __lowerCamelCase ( A__ ) -> List[Any]: """simple docstring""" UpperCamelCase = F.to_tensor(A__ ) UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]: """simple docstring""" logger.info('Converting model...' ) # load original state dict UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' ) # rename keys for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) UpperCamelCase = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = 'model.' for key in state_dict.copy().keys(): if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): UpperCamelCase = state_dict.pop(A__ ) UpperCamelCase = val # create HuggingFace model and load state dict UpperCamelCase = TableTransformerConfig( backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase = 15 UpperCamelCase = 2 UpperCamelCase = {0: 'table', 1: 'table rotated'} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} else: UpperCamelCase = 125 UpperCamelCase = 6 UpperCamelCase = { 0: 'table', 1: 'table column', 2: 'table row', 3: 'table column header', 4: 'table projected row header', 5: 'table spanning cell', } UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} UpperCamelCase = DetrImageProcessor( format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 ) UpperCamelCase = TableTransformerForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() # verify our conversion UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png' UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ ) UpperCamelCase = Image.open(A__ ).convert('RGB' ) UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 ) UpperCamelCase = model(A__ ) if "detection" in checkpoint_url: UpperCamelCase = (1, 15, 3) UpperCamelCase = torch.tensor( [[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] ) UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] ) else: UpperCamelCase = (1, 125, 7) UpperCamelCase = torch.tensor( [[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] ) UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if push_to_hub: # Push model to HF hub logger.info('Pushing model to the hub...' ) UpperCamelCase = ( 'microsoft/table-transformer-detection' if 'detection' in checkpoint_url else 'microsoft/table-transformer-structure-recognition' ) model.push_to_hub(A__ ) image_processor.push_to_hub(A__ ) if __name__ == "__main__": _lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", type=str, choices=[ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", ], help="URL of the Table Transformer checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowerCamelCase : int = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
28
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : int = logging.get_logger(__name__) _lowerCamelCase : List[Any] = { "andreasmadsen/efficient_mlm_m0.40": ( "https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json" ), } class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = """roberta-prelayernorm""" def __init__( self : Tuple , UpperCamelCase__ : Dict=5_0_2_6_5 , UpperCamelCase__ : Optional[int]=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Optional[Any]=1_2 , UpperCamelCase__ : Union[str, Any]=3_0_7_2 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=5_1_2 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : List[str]=0.0_2 , UpperCamelCase__ : List[str]=1E-1_2 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Optional[int]="absolute" , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[Any] , ): """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = hidden_act UpperCamelCase = intermediate_size UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = position_embedding_type UpperCamelCase = use_cache UpperCamelCase = classifier_dropout class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" @property def A ( self : int ): """simple docstring""" if self.task == "multiple-choice": UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
28
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowerCamelCase : Any = logging.get_logger(__name__) @add_end_docstrings(_a ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ): """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) requires_backends(self , 'decord' ) self.check_model_type(UpperCamelCase__ ) def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ): """simple docstring""" UpperCamelCase = {} if frame_sampling_rate is not None: UpperCamelCase = frame_sampling_rate if num_frames is not None: UpperCamelCase = num_frames UpperCamelCase = {} if top_k is not None: UpperCamelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ): """simple docstring""" return super().__call__(UpperCamelCase__ , **UpperCamelCase__ ) def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ): """simple docstring""" if num_frames is None: UpperCamelCase = self.model.config.num_frames if video.startswith('http://' ) or video.startswith('https://' ): UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content ) UpperCamelCase = VideoReader(UpperCamelCase__ ) videoreader.seek(0 ) UpperCamelCase = 0 UpperCamelCase = num_frames * frame_sampling_rate - 1 UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa ) UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy() UpperCamelCase = list(UpperCamelCase__ ) UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework ) return model_inputs def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = self.model(**UpperCamelCase__ ) return model_outputs def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCamelCase = self.model.config.num_labels if self.framework == "pt": UpperCamelCase = model_outputs.logits.softmax(-1 )[0] UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ ) else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCamelCase = scores.tolist() UpperCamelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
28
1
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_a ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) _SCREAMING_SNAKE_CASE = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} ) _SCREAMING_SNAKE_CASE = Features( { """answers""": Sequence( { """text""": Value("""string""" ), """answer_start""": Value("""int32""" ), } ) } ) _SCREAMING_SNAKE_CASE = "question" _SCREAMING_SNAKE_CASE = "context" _SCREAMING_SNAKE_CASE = "answers" @property def A ( self : Dict ): """simple docstring""" return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
28
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _lowerCamelCase : Optional[int] = ( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) _lowerCamelCase : Union[str, Any] = ( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) _lowerCamelCase : Dict = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) _lowerCamelCase : Dict = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) _lowerCamelCase : Optional[Any] = ( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]), ("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [14, 13, 12, 11, 10]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) _lowerCamelCase : List[Any] = ( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) _lowerCamelCase : List[str] = ( ("JH AH TH KH QH", 23), ("JH 9H TH KH QH", 22), ("JC KH JS JD JH", 21), ("KH KC 3S 3H 3D", 20), ("8C 9C 5C 3C TC", 19), ("JS QS 9H TS KH", 18), ("7C 7S KH 2H 7H", 17), ("3C KH 5D 5S KH", 16), ("QH 8H KD JH 8S", 15), ("2D 6D 9D TH 7D", 14), ) def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) ) UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]: """simple docstring""" return (generate_random_hand() for _ in range(A__ )) @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" assert PokerHand(A__ )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" assert PokerHand(A__ )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , A__ ) def __lowerCamelCase ( A__ , A__ , A__ ) -> str: """simple docstring""" UpperCamelCase = PokerHand(A__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Dict: """simple docstring""" assert PokerHand(A__ )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> str: """simple docstring""" assert PokerHand(A__ )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , A__ ) def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple: """simple docstring""" assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]: """simple docstring""" assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected def __lowerCamelCase ( ) -> str: """simple docstring""" UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS] UpperCamelCase = poker_hands.copy() shuffle(A__ ) UpperCamelCase = chain(sorted(A__ ) ) for index, hand in enumerate(A__ ): assert hand == poker_hands[index] def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" # Test that five high straights are compared correctly. UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=A__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def __lowerCamelCase ( ) -> str: """simple docstring""" # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. UpperCamelCase = PokerHand('2C 4S AS 3D 5C' ) UpperCamelCase = True UpperCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def __lowerCamelCase ( ) -> List[str]: """simple docstring""" # Problem number 54 from Project Euler # Testing from poker_hands.txt file UpperCamelCase = 0 UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) ) UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' ) with open(A__ ) as file_hand: for line in file_hand: UpperCamelCase = line[:14].strip() UpperCamelCase = line[15:].strip() UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ ) UpperCamelCase = player.compare_with(A__ ) if output == "Win": answer += 1 assert answer == 376
28
1
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline _SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""} _SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""} _SCREAMING_SNAKE_CASE = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _SCREAMING_SNAKE_CASE = frozenset([] ) def A ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , ) UpperCamelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) UpperCamelCase = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , ) torch.manual_seed(0 ) UpperCamelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) UpperCamelCase = CLIPTextModel(UpperCamelCase__ ) UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) UpperCamelCase = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def A ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=0 ): """simple docstring""" UpperCamelCase = floats_tensor((1, 1_6, 1_6) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) UpperCamelCase = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) if str(UpperCamelCase__ ).startswith('mps' ): UpperCamelCase = torch.manual_seed(UpperCamelCase__ ) else: UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) UpperCamelCase = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=0 ): """simple docstring""" UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('RGB' ) if str(UpperCamelCase__ ).startswith('mps' ): UpperCamelCase = torch.manual_seed(UpperCamelCase__ ) else: UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) UpperCamelCase = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def A ( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]=0 ): """simple docstring""" UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('RGB' ) if str(UpperCamelCase__ ).startswith('mps' ): UpperCamelCase = torch.manual_seed(UpperCamelCase__ ) else: UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) UpperCamelCase = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def A ( self : List[Any] ): """simple docstring""" if not hasattr(self.pipeline_class , '_optional_components' ): return UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ ) UpperCamelCase = pipe(**UpperCamelCase__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCamelCase__ ) UpperCamelCase = self.pipeline_class.from_pretrained(UpperCamelCase__ ) pipe_loaded.to(UpperCamelCase__ ) pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ ) UpperCamelCase = pipe_loaded(**UpperCamelCase__ )[0] UpperCamelCase = np.abs(output - output_loaded ).max() self.assertLess(UpperCamelCase__ , 1E-4 ) def A ( self : List[str] ): """simple docstring""" UpperCamelCase = 'cpu' UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = self.get_dummy_mask_inputs(UpperCamelCase__ ) UpperCamelCase = pipe.generate_mask(**UpperCamelCase__ ) UpperCamelCase = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 1_6, 1_6) ) UpperCamelCase = np.array([0] * 9 ) UpperCamelCase = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def A ( self : Dict ): """simple docstring""" UpperCamelCase = 'cpu' UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = self.get_dummy_inversion_inputs(UpperCamelCase__ ) UpperCamelCase = pipe.invert(**UpperCamelCase__ ).images UpperCamelCase = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 3_2, 3_2, 3) ) UpperCamelCase = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1E-3 ) def A ( self : List[Any] ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = 'cpu' UpperCamelCase = self.get_dummy_components() UpperCamelCase = {'beta_start': 0.0_0_0_8_5, 'beta_end': 0.0_1_2, 'beta_schedule': 'scaled_linear'} UpperCamelCase = DPMSolverMultistepScheduler(**UpperCamelCase__ ) UpperCamelCase = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ ) UpperCamelCase = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = self.get_dummy_inversion_inputs(UpperCamelCase__ ) UpperCamelCase = pipe.invert(**UpperCamelCase__ ).images UpperCamelCase = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 3_2, 3_2, 3) ) UpperCamelCase = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1E-3 ) @require_torch_gpu @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def A ( self : str ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def A ( cls : Union[str, Any] ): """simple docstring""" UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) UpperCamelCase = raw_image.convert('RGB' ).resize((7_6_8, 7_6_8) ) UpperCamelCase = raw_image def A ( self : Tuple ): """simple docstring""" UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa ) UpperCamelCase = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCamelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = 'a bowl of fruit' UpperCamelCase = 'a bowl of pears' UpperCamelCase = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , ) UpperCamelCase = pipe.invert( prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents UpperCamelCase = pipe( prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type='numpy' , ).images[0] UpperCamelCase = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) ) / 2_5_5 ) assert np.abs((expected_image - image).max() ) < 5E-1 def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa ) UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCamelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = 'a bowl of fruit' UpperCamelCase = 'a bowl of pears' UpperCamelCase = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , ) UpperCamelCase = pipe.invert( prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=2_5 , ).latents UpperCamelCase = pipe( prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type='numpy' , ).images[0] UpperCamelCase = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) ) / 2_5_5 ) assert np.abs((expected_image - image).max() ) < 5E-1
28
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = None class SCREAMING_SNAKE_CASE ( _a , _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 2 @register_to_config def __init__( self : Union[str, Any] , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 1.0_0_7 , UpperCamelCase__ : float = 8_0 , UpperCamelCase__ : float = 0.0_5 , UpperCamelCase__ : float = 5_0 , ): """simple docstring""" UpperCamelCase = sigma_max # setable values UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None # sigma(t_i) def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ): """simple docstring""" return sample def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ): """simple docstring""" UpperCamelCase = num_inference_steps UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy() UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ ) UpperCamelCase = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] UpperCamelCase = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ ) def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : Optional[torch.Generator] = None ): """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: UpperCamelCase = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device ) UpperCamelCase = sigma + gamma * sigma UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = sample_hat + sigma_hat * model_output UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = sample_prev + sigma_prev * model_output UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ): """simple docstring""" raise NotImplementedError()
28
1
'''simple docstring''' def __lowerCamelCase ( A__ , A__ ) -> str: """simple docstring""" if not isinstance(A__ , A__ ): raise ValueError('iterations must be defined as integers' ) if not isinstance(A__ , A__ ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) UpperCamelCase = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(A__ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Tuple = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
1
'''simple docstring''' from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : List[str] , UpperCamelCase__ : int = 6 ): """simple docstring""" UpperCamelCase = None UpperCamelCase = None self.create_linked_list(UpperCamelCase__ ) def A ( self : List[Any] , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = Node() UpperCamelCase = current_node UpperCamelCase = current_node UpperCamelCase = current_node for _ in range(1 , UpperCamelCase__ ): UpperCamelCase = Node() UpperCamelCase = current_node UpperCamelCase = previous_node UpperCamelCase = current_node UpperCamelCase = self.front UpperCamelCase = previous_node def A ( self : str ): """simple docstring""" return ( self.front == self.rear and self.front is not None and self.front.data is None ) def A ( self : List[str] ): """simple docstring""" self.check_can_perform_operation() return self.front.data if self.front else None def A ( self : str , UpperCamelCase__ : Any ): """simple docstring""" if self.rear is None: return self.check_is_full() if not self.is_empty(): UpperCamelCase = self.rear.next if self.rear: UpperCamelCase = data def A ( self : Dict ): """simple docstring""" self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: UpperCamelCase = self.front.data UpperCamelCase = None return data UpperCamelCase = self.front UpperCamelCase = old_front.next UpperCamelCase = old_front.data UpperCamelCase = None return data def A ( self : Any ): """simple docstring""" if self.is_empty(): raise Exception('Empty Queue' ) def A ( self : Tuple ): """simple docstring""" if self.rear and self.rear.next == self.front: raise Exception('Full Queue' ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Any ): """simple docstring""" UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' def __lowerCamelCase ( A__ = 10**9 ) -> int: """simple docstring""" UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
28
1
'''simple docstring''' import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( A__ , A__ , A__ , A__="attention" ) -> Tuple: """simple docstring""" UpperCamelCase = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] UpperCamelCase = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] UpperCamelCase = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] UpperCamelCase = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def __lowerCamelCase ( A__ , A__ , A__ , A__=False ) -> Optional[int]: """simple docstring""" if split_mlp_wi: UpperCamelCase = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] UpperCamelCase = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] UpperCamelCase = (wi_a, wi_a) else: UpperCamelCase = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""] UpperCamelCase = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Tuple: """simple docstring""" return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""] def __lowerCamelCase ( A__ , *, A__ , A__ ) -> Tuple: """simple docstring""" UpperCamelCase = traverse_util.flatten_dict(variables['target'] ) UpperCamelCase = {'/'.join(A__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi UpperCamelCase = 'encoder/layers_0/mlp/wi_0/kernel' in old print('Split MLP:' , A__ ) UpperCamelCase = collections.OrderedDict() # Shared embeddings. UpperCamelCase = old['token_embedder/embedding'] # Encoder. for i in range(A__ ): # Block i, layer 0 (Self Attention). UpperCamelCase = tax_layer_norm_lookup(A__ , A__ , 'encoder' , 'pre_attention_layer_norm' ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = tax_attention_lookup(A__ , A__ , 'encoder' , 'attention' ) UpperCamelCase = layer_norm UpperCamelCase = k.T UpperCamelCase = o.T UpperCamelCase = q.T UpperCamelCase = v.T # Block i, layer 1 (MLP). UpperCamelCase = tax_layer_norm_lookup(A__ , A__ , 'encoder' , 'pre_mlp_layer_norm' ) UpperCamelCase , UpperCamelCase = tax_mlp_lookup(A__ , A__ , 'encoder' , A__ ) UpperCamelCase = layer_norm if split_mlp_wi: UpperCamelCase = wi[0].T UpperCamelCase = wi[1].T else: UpperCamelCase = wi.T UpperCamelCase = wo.T UpperCamelCase = old[ 'encoder/relpos_bias/rel_embedding' ].T UpperCamelCase = old['encoder/encoder_norm/scale'] if not is_encoder_only: # Decoder. for i in range(A__ ): # Block i, layer 0 (Self Attention). UpperCamelCase = tax_layer_norm_lookup(A__ , A__ , 'decoder' , 'pre_self_attention_layer_norm' ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = tax_attention_lookup(A__ , A__ , 'decoder' , 'self_attention' ) UpperCamelCase = layer_norm UpperCamelCase = k.T UpperCamelCase = o.T UpperCamelCase = q.T UpperCamelCase = v.T # Block i, layer 1 (Cross Attention). UpperCamelCase = tax_layer_norm_lookup(A__ , A__ , 'decoder' , 'pre_cross_attention_layer_norm' ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = tax_attention_lookup(A__ , A__ , 'decoder' , 'encoder_decoder_attention' ) UpperCamelCase = layer_norm UpperCamelCase = k.T UpperCamelCase = o.T UpperCamelCase = q.T UpperCamelCase = v.T # Block i, layer 2 (MLP). UpperCamelCase = tax_layer_norm_lookup(A__ , A__ , 'decoder' , 'pre_mlp_layer_norm' ) UpperCamelCase , UpperCamelCase = tax_mlp_lookup(A__ , A__ , 'decoder' , A__ ) UpperCamelCase = layer_norm if split_mlp_wi: UpperCamelCase = wi[0].T UpperCamelCase = wi[1].T else: UpperCamelCase = wi.T UpperCamelCase = wo.T UpperCamelCase = old['decoder/decoder_norm/scale'] UpperCamelCase = old[ 'decoder/relpos_bias/rel_embedding' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: UpperCamelCase = old['decoder/logits_dense/kernel'].T return new def __lowerCamelCase ( A__ , A__ ) -> str: """simple docstring""" UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: UpperCamelCase = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: UpperCamelCase = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) UpperCamelCase = state_dict['shared.weight'] return state_dict def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> str: """simple docstring""" UpperCamelCase = checkpoints.load_tax_checkpoint(A__ ) UpperCamelCase = convert_tax_to_pytorch(A__ , num_layers=config.num_layers , is_encoder_only=A__ ) UpperCamelCase = make_state_dict(A__ , A__ ) model.load_state_dict(A__ , strict=A__ ) def __lowerCamelCase ( A__ , A__ , A__ , A__ = False ) -> Tuple: """simple docstring""" UpperCamelCase = TaConfig.from_json_file(A__ ) print(F"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: UpperCamelCase = TaEncoderModel(A__ ) else: UpperCamelCase = TaForConditionalGeneration(A__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(A__ , A__ , A__ , A__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(A__ ) # Verify that we can load the checkpoint. model.from_pretrained(A__ ) print('Done' ) if __name__ == "__main__": _lowerCamelCase : int = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) _lowerCamelCase : Union[str, Any] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
28
'''simple docstring''' import math class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1 """simple docstring""" UpperCamelCase = n UpperCamelCase = [ [math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ ) ] # adjacency matrix for weight UpperCamelCase = [ [math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ ) ] # dp[i][j] stores minimum distance from i to j def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = w def A ( self : str ): """simple docstring""" for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ): """simple docstring""" return self.dp[u][v] if __name__ == "__main__": _lowerCamelCase : List[str] = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
28
1
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __lowerCamelCase ( A__ ) -> Any: """simple docstring""" UpperCamelCase = SwinConfig() UpperCamelCase = swin_name.split('_' ) UpperCamelCase = name_split[1] UpperCamelCase = int(name_split[4] ) UpperCamelCase = int(name_split[3][-1] ) if model_size == "tiny": UpperCamelCase = 96 UpperCamelCase = (2, 2, 6, 2) UpperCamelCase = (3, 6, 12, 24) elif model_size == "small": UpperCamelCase = 96 UpperCamelCase = (2, 2, 18, 2) UpperCamelCase = (3, 6, 12, 24) elif model_size == "base": UpperCamelCase = 128 UpperCamelCase = (2, 2, 18, 2) UpperCamelCase = (4, 8, 16, 32) else: UpperCamelCase = 192 UpperCamelCase = (2, 2, 18, 2) UpperCamelCase = (6, 12, 24, 48) if "in22k" in swin_name: UpperCamelCase = 21_841 else: UpperCamelCase = 1_000 UpperCamelCase = 'huggingface/label-files' UpperCamelCase = 'imagenet-1k-id2label.json' UpperCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) UpperCamelCase = {int(A__ ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} UpperCamelCase = img_size UpperCamelCase = num_classes UpperCamelCase = embed_dim UpperCamelCase = depths UpperCamelCase = num_heads UpperCamelCase = window_size return config def __lowerCamelCase ( A__ ) -> List[Any]: """simple docstring""" if "patch_embed.proj" in name: UpperCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: UpperCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: UpperCamelCase = 'encoder.' + name if "attn.proj" in name: UpperCamelCase = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: UpperCamelCase = name.replace('attn' , 'attention.self' ) if "norm1" in name: UpperCamelCase = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: UpperCamelCase = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: UpperCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: UpperCamelCase = name.replace('mlp.fc2' , 'output.dense' ) if name == "norm.weight": UpperCamelCase = 'layernorm.weight' if name == "norm.bias": UpperCamelCase = 'layernorm.bias' if "head" in name: UpperCamelCase = name.replace('head' , 'classifier' ) else: UpperCamelCase = 'swin.' + name return name def __lowerCamelCase ( A__ , A__ ) -> Optional[int]: """simple docstring""" for key in orig_state_dict.copy().keys(): UpperCamelCase = orig_state_dict.pop(A__ ) if "mask" in key: continue elif "qkv" in key: UpperCamelCase = key.split('.' ) UpperCamelCase = int(key_split[1] ) UpperCamelCase = int(key_split[3] ) UpperCamelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCamelCase = val[:dim, :] UpperCamelCase = val[ dim : dim * 2, : ] UpperCamelCase = val[-dim:, :] else: UpperCamelCase = val[ :dim ] UpperCamelCase = val[ dim : dim * 2 ] UpperCamelCase = val[ -dim: ] else: UpperCamelCase = val return orig_state_dict def __lowerCamelCase ( A__ , A__ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = timm.create_model(A__ , pretrained=A__ ) timm_model.eval() UpperCamelCase = get_swin_config(A__ ) UpperCamelCase = SwinForImageClassification(A__ ) model.eval() UpperCamelCase = convert_state_dict(timm_model.state_dict() , A__ ) model.load_state_dict(A__ ) UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCamelCase = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) ) UpperCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw ) UpperCamelCase = image_processor(images=A__ , return_tensors='pt' ) UpperCamelCase = timm_model(inputs['pixel_values'] ) UpperCamelCase = model(**A__ ).logits assert torch.allclose(A__ , A__ , atol=1e-3 ) print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swin_name", default="swin_tiny_patch4_window7_224", type=str, help="Name of the Swin timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) _lowerCamelCase : Optional[int] = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
28
'''simple docstring''' _lowerCamelCase : int = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
28
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor _lowerCamelCase : str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : List[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[str] ): """simple docstring""" warnings.warn( 'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use GLPNImageProcessor instead.' , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
28
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCamelCase : List[Any] = { "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
1
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
28
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict: """simple docstring""" UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T return jnp.matmul(A__ , norm_emb_a.T ) class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = jnp.floataa def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config ) UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype ) UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) ) UpperCamelCase = self.param( 'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) ) UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) ) UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) ) def __call__( self : str , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = self.vision_model(UpperCamelCase__ )[1] UpperCamelCase = self.visual_projection(UpperCamelCase__ ) UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds ) UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs UpperCamelCase = 0.0 UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment UpperCamelCase = jnp.round(UpperCamelCase__ , 3 ) UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ ) # Use a lower threshold if an image has any special care concept UpperCamelCase = is_special_care * 0.0_1 UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment UpperCamelCase = jnp.round(UpperCamelCase__ , 3 ) UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = CLIPConfig _SCREAMING_SNAKE_CASE = """clip_input""" _SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ): """simple docstring""" if input_shape is None: UpperCamelCase = (1, 2_2_4, 2_2_4, 3) UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ ) super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init ) def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ): """simple docstring""" UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ ) UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng} UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params'] return random_params def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ): """simple docstring""" UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) ) return self.module.apply( {'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
28
1
'''simple docstring''' import requests from bsa import BeautifulSoup def __lowerCamelCase ( A__ = "AAPL" ) -> str: """simple docstring""" UpperCamelCase = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" UpperCamelCase = BeautifulSoup(requests.get(A__ ).text , 'html.parser' ) UpperCamelCase = 'My(6px) Pos(r) smartphone_Mt(6px)' return soup.find('div' , class_=class_ ).find('span' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
28
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _lowerCamelCase : str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ): """simple docstring""" warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
28
1
'''simple docstring''' from __future__ import annotations from typing import Any def __lowerCamelCase ( A__ ) -> None: """simple docstring""" create_state_space_tree(A__ , [] , 0 ) def __lowerCamelCase ( A__ , A__ , A__ ) -> None: """simple docstring""" if index == len(A__ ): print(A__ ) return create_state_space_tree(A__ , A__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(A__ , A__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": _lowerCamelCase : list[Any] = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["A", "B", "C"]) generate_all_subsequences(seq)
28
'''simple docstring''' import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed _lowerCamelCase : Optional[int] = logging.getLogger(__name__) def __lowerCamelCase ( A__=2 , A__=3 , A__=16 , A__ = 10 , A__ = 2 ) -> int: """simple docstring""" def get_dataset(A__ ): UpperCamelCase = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCamelCase = get_dataset(A__ ) UpperCamelCase = get_dataset(A__ ) UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__=None ) -> int: """simple docstring""" UpperCamelCase = [] for epoch in range(A__ ): # Train quickly model.train() for batch in dataloader: UpperCamelCase , UpperCamelCase = batch UpperCamelCase = model(A__ ) UpperCamelCase = torch.nn.functional.mse_loss(A__ , A__ ) accelerator.backward(A__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self : Tuple ): """simple docstring""" super().__init__() UpperCamelCase = nn.Parameter(torch.randn(1 ) ) UpperCamelCase = nn.Parameter(torch.randn(1 ) ) def A ( self : str , UpperCamelCase__ : Dict ): """simple docstring""" return x * self.a + self.b class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def A ( self : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase__ , automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def A ( self : Optional[int] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() # Train baseline UpperCamelCase = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial UpperCamelCase = os.path.join(UpperCamelCase__ , 'initial' ) accelerator.save_state(UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) accelerator.load_state(UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save everything UpperCamelCase = os.path.join(UpperCamelCase__ , 'checkpoint' ) accelerator.save_state(UpperCamelCase__ ) # Load everything back in and make sure all states work accelerator.load_state(UpperCamelCase__ ) test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase__ ) UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch.tensor([1, 2, 3] ) UpperCamelCase = torch.tensor([2, 3, 4] ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(net.parameters() ) UpperCamelCase = Accelerator() with self.assertRaises(UpperCamelCase__ ) as ve: accelerator.register_for_checkpointing(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def A ( self : Dict ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase__ , step_size=1 , gamma=0.9_9 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() UpperCamelCase = scheduler.state_dict() train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertNotEqual(UpperCamelCase__ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(UpperCamelCase__ , scheduler.state_dict() ) def A ( self : List[str] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ , total_limit=2 ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase = accelerator.prepare(UpperCamelCase__ ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def A ( self : Dict ): """simple docstring""" UpperCamelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() ) if __name__ == "__main__": _lowerCamelCase : Optional[int] = "/tmp/accelerate/state_checkpointing" _lowerCamelCase : Union[str, Any] = DummyModel() _lowerCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3) _lowerCamelCase : List[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) _lowerCamelCase ,_lowerCamelCase : Tuple = dummy_dataloaders() _lowerCamelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline _lowerCamelCase : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Union[str, Any] = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) _lowerCamelCase ,_lowerCamelCase : Tuple = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: _lowerCamelCase : Any = group["params"][0].device break assert param_device.type == accelerator.device.type _lowerCamelCase : Tuple = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: _lowerCamelCase : Optional[Any] = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: _lowerCamelCase : Dict = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
28
1
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict: """simple docstring""" UpperCamelCase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value') UpperCamelCase = ( ('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel'), ) if not os.path.isdir(A__ ): os.makedirs(A__ ) UpperCamelCase = model.state_dict() def to_tf_var_name(A__ ): for patt, repl in iter(A__ ): UpperCamelCase = name.replace(A__ , A__ ) return F"""bert/{name}""" def create_tf_var(A__ , A__ , A__ ): UpperCamelCase = tf.dtypes.as_dtype(tensor.dtype ) UpperCamelCase = tf.get_variable(dtype=A__ , shape=tensor.shape , name=A__ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(A__ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCamelCase = to_tf_var_name(A__ ) UpperCamelCase = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): UpperCamelCase = torch_tensor.T UpperCamelCase = create_tf_var(tensor=A__ , name=A__ , session=A__ ) tf.keras.backend.set_value(A__ , A__ ) UpperCamelCase = session.run(A__ ) print(F"""Successfully created {tf_name}: {np.allclose(A__ , A__ )}""" ) UpperCamelCase = tf.train.Saver(tf.trainable_variables() ) saver.save(A__ , os.path.join(A__ , model_name.replace('-' , '_' ) + '.ckpt' ) ) def __lowerCamelCase ( A__=None ) -> Optional[int]: """simple docstring""" UpperCamelCase = argparse.ArgumentParser() parser.add_argument('--model_name' , type=A__ , required=A__ , help='model name e.g. bert-base-uncased' ) parser.add_argument( '--cache_dir' , type=A__ , default=A__ , required=A__ , help='Directory containing pytorch model' ) parser.add_argument('--pytorch_model_path' , type=A__ , required=A__ , help='/path/to/<pytorch-model-name>.bin' ) parser.add_argument('--tf_cache_dir' , type=A__ , required=A__ , help='Directory in which to save tensorflow model' ) UpperCamelCase = parser.parse_args(A__ ) UpperCamelCase = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=A__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
28
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _lowerCamelCase : List[str] = 5_0000 _lowerCamelCase : Optional[int] = 5000 _lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__) _lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" for i in range(A__ ): UpperCamelCase = dataset[i] @get_duration def __lowerCamelCase ( A__ , A__ , A__ ) -> int: """simple docstring""" for i in range(0 , len(A__ ) , A__ ): UpperCamelCase = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]: """simple docstring""" with dataset.formatted_as(type=A__ ): for i in range(A__ ): UpperCamelCase = dataset[i] @get_duration def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int: """simple docstring""" with dataset.formatted_as(type=A__ ): for i in range(0 , A__ , A__ ): UpperCamelCase = dataset[i : i + batch_size] def __lowerCamelCase ( ) -> List[str]: """simple docstring""" UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES} UpperCamelCase = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}), ] UpperCamelCase = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) UpperCamelCase = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) UpperCamelCase = generate_example_dataset( os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(A__ ) ) UpperCamelCase = func(A__ , **A__ ) print('shuffling dataset' ) UpperCamelCase = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(A__ ) ) UpperCamelCase = func( A__ , **A__ ) with open(A__ , 'wb' ) as f: f.write(json.dumps(A__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
28
1
'''simple docstring''' from __future__ import annotations _lowerCamelCase : int = 10 def __lowerCamelCase ( A__ ) -> list[int]: """simple docstring""" UpperCamelCase = 1 UpperCamelCase = max(A__ ) while placement <= max_digit: # declare and initialize empty buckets UpperCamelCase = [[] for _ in range(A__ )] # split list_of_ints between the buckets for i in list_of_ints: UpperCamelCase = int((i / placement) % RADIX ) buckets[tmp].append(A__ ) # put each buckets' contents into list_of_ints UpperCamelCase = 0 for b in range(A__ ): for i in buckets[b]: UpperCamelCase = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets _lowerCamelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n" _lowerCamelCase : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n" _lowerCamelCase : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def A ( self : Union[str, Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False ): """simple docstring""" if rouge_types is None: UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ ) if use_aggregator: UpperCamelCase = scoring.BootstrapAggregator() else: UpperCamelCase = [] for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = scorer.score(UpperCamelCase__ , UpperCamelCase__ ) if use_aggregator: aggregator.add_scores(UpperCamelCase__ ) else: scores.append(UpperCamelCase__ ) if use_aggregator: UpperCamelCase = aggregator.aggregate() else: UpperCamelCase = {} for key in scores[0]: UpperCamelCase = [score[key] for score in scores] return result
28
1
'''simple docstring''' import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def A ( self : Any ): """simple docstring""" UpperCamelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , 'embed_dim' ) ) self.parent.assertTrue(hasattr(UpperCamelCase__ , 'num_heads' ) ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Union[str, Any]=6_4 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Dict=[1_6, 4_8, 9_6] , UpperCamelCase__ : Any=[1, 3, 6] , UpperCamelCase__ : List[str]=[1, 2, 1_0] , UpperCamelCase__ : str=[7, 3, 3] , UpperCamelCase__ : int=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[2, 2, 2] , UpperCamelCase__ : List[str]=[False, False, True] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : int=0.0_2 , UpperCamelCase__ : Dict=1E-1_2 , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : int=2 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_sizes UpperCamelCase = patch_stride UpperCamelCase = patch_padding UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = num_labels UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = num_heads UpperCamelCase = stride_kv UpperCamelCase = depth UpperCamelCase = cls_token UpperCamelCase = attention_drop_rate UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps def A ( self : int ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def A ( self : str ): """simple docstring""" return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def A ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = CvtModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) UpperCamelCase = (self.image_size, self.image_size) UpperCamelCase , UpperCamelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): UpperCamelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) UpperCamelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def A ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = CvtForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = (CvtModel, CvtForImageClassification) if is_torch_available() else () _SCREAMING_SNAKE_CASE = ( {"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def A ( self : str ): """simple docstring""" UpperCamelCase = CvtModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : Union[str, Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : Any ): """simple docstring""" return @unittest.skip(reason='Cvt does not output attentions' ) def A ( self : List[Any] ): """simple docstring""" pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def A ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def A ( self : List[str] ): """simple docstring""" pass def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(UpperCamelCase__ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def A ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" def check_hidden_states_output(UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ): UpperCamelCase = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) UpperCamelCase = outputs.hidden_states UpperCamelCase = len(self.model_tester.depth ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def A ( self : int ): """simple docstring""" pass @slow def A ( self : Tuple ): """simple docstring""" for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = CvtModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def A ( self : List[str] ): """simple docstring""" return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def A ( self : Any ): """simple docstring""" UpperCamelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase__ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**UpperCamelCase__ ) # verify the logits UpperCamelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
28
'''simple docstring''' from PIL import Image def __lowerCamelCase ( A__ , A__ ) -> Image: """simple docstring""" def brightness(A__ ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(A__ ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 _lowerCamelCase : List[str] = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
28
1
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ): """simple docstring""" def A ( self : Any ): """simple docstring""" UpperCamelCase = load_tool('text-to-speech' ) self.tool.setup() def A ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = self.tool('hey' ) UpperCamelCase = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) ) def A ( self : Optional[int] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = self.tool('hey' ) UpperCamelCase = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
28
'''simple docstring''' from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
28
1
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = None def __lowerCamelCase ( A__ , A__=0.999 , A__="cosine" , ) -> Tuple: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(A__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A__ ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCamelCase = [] for i in range(A__ ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) ) return torch.tensor(A__ , dtype=torch.floataa ) class SCREAMING_SNAKE_CASE ( _a , _a ): """simple docstring""" @register_to_config def __init__( self : List[str] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : str = "fixed_small_log" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[float] = 1.0 , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : str = "squaredcos_cap_v2" , ): """simple docstring""" if beta_schedule != "squaredcos_cap_v2": raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' ) UpperCamelCase = betas_for_alpha_bar(UpperCamelCase__ ) UpperCamelCase = 1.0 - self.betas UpperCamelCase = torch.cumprod(self.alphas , dim=0 ) UpperCamelCase = torch.tensor(1.0 ) # standard deviation of the initial noise distribution UpperCamelCase = 1.0 # setable values UpperCamelCase = None UpperCamelCase = torch.from_numpy(np.arange(0 , UpperCamelCase__ )[::-1].copy() ) UpperCamelCase = variance_type def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ): """simple docstring""" return sample def A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ): """simple docstring""" UpperCamelCase = num_inference_steps UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) UpperCamelCase = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ ) def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None ): """simple docstring""" if prev_timestep is None: UpperCamelCase = t - 1 UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase = self.betas[t] else: UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: UpperCamelCase = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": UpperCamelCase = torch.log(torch.clamp(UpperCamelCase__ , min=1E-2_0 ) ) UpperCamelCase = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler UpperCamelCase = variance.log() UpperCamelCase = beta.log() UpperCamelCase = (predicted_variance + 1) / 2 UpperCamelCase = frac * max_log + (1 - frac) * min_log return variance def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 ) else: UpperCamelCase = None # 1. compute alphas, betas if prev_timestep is None: UpperCamelCase = t - 1 UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase = self.betas[t] UpperCamelCase = self.alphas[t] else: UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev UpperCamelCase = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCamelCase = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" ' for the UnCLIPScheduler.' ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCamelCase = torch.clamp( UpperCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCamelCase = 0 if t > 0: UpperCamelCase = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ , device=model_output.device ) UpperCamelCase = self._get_variance( UpperCamelCase__ , predicted_variance=UpperCamelCase__ , prev_timestep=UpperCamelCase__ , ) if self.variance_type == "fixed_small_log": UpperCamelCase = variance elif self.variance_type == "learned_range": UpperCamelCase = (0.5 * variance).exp() else: raise ValueError( f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" ' for the UnCLIPScheduler.' ) UpperCamelCase = variance * variance_noise UpperCamelCase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.IntTensor , ): """simple docstring""" UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) UpperCamelCase = timesteps.to(original_samples.device ) UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
28
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Optional[int]=3_6 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Union[str, Any]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def A ( self : int ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Optional[int] ): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.get_config() UpperCamelCase = 3_0_0 return config def A ( self : Tuple ): """simple docstring""" ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = self.prepare_config_and_inputs() UpperCamelCase = True UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): """simple docstring""" UpperCamelCase = MraModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , ): """simple docstring""" UpperCamelCase = True UpperCamelCase = MraModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = MraForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = MraForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MraForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MraForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict ): """simple docstring""" UpperCamelCase = self.num_choices UpperCamelCase = MraForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = () def A ( self : str ): """simple docstring""" UpperCamelCase = MraModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : str ): """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def A ( self : List[Any] ): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = MraModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @unittest.skip(reason='MRA does not output attentions' ) def A ( self : List[str] ): """simple docstring""" return @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = torch.Size((1, 2_5_6, 7_6_8) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = 5_0_2_6_5 UpperCamelCase = torch.Size((1, 2_5_6, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) UpperCamelCase = torch.arange(4_0_9_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = 5_0_2_6_5 UpperCamelCase = torch.Size((1, 4_0_9_6, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
28
1
'''simple docstring''' import math class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1 """simple docstring""" UpperCamelCase = n UpperCamelCase = [ [math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ ) ] # adjacency matrix for weight UpperCamelCase = [ [math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ ) ] # dp[i][j] stores minimum distance from i to j def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = w def A ( self : str ): """simple docstring""" for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ): """simple docstring""" return self.dp[u][v] if __name__ == "__main__": _lowerCamelCase : List[str] = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
28
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _lowerCamelCase : Union[str, Any] = "\\n\n" _lowerCamelCase : List[str] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" _lowerCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def A ( self : Tuple ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , ) def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int = 1_6 , UpperCamelCase__ : bool = True , UpperCamelCase__ : List[Any]=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCamelCase = 'cuda' else: UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu' UpperCamelCase = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ ) UpperCamelCase = model.to(UpperCamelCase__ ) UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(UpperCamelCase__ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCamelCase = model.config.max_length - 1 else: UpperCamelCase = model.config.max_length UpperCamelCase = tokenizer( UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='pt' , return_attention_mask=UpperCamelCase__ , ).to(UpperCamelCase__ ) UpperCamelCase = encodings['input_ids'] UpperCamelCase = encodings['attention_mask'] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCamelCase = [] UpperCamelCase = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ) ): UpperCamelCase = min(start_index + batch_size , len(UpperCamelCase__ ) ) UpperCamelCase = encoded_texts[start_index:end_index] UpperCamelCase = attn_masks[start_index:end_index] if add_start_token: UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase__ ) UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCamelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase__ ), attn_mask] , dim=1 ) UpperCamelCase = encoded_batch with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ).logits UpperCamelCase = out_logits[..., :-1, :].contiguous() UpperCamelCase = labels[..., 1:].contiguous() UpperCamelCase = attn_mask[..., 1:].contiguous() UpperCamelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase__ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase__ )}
28
1
def _a ( a :int = 50 ) -> int: a = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
0
'''simple docstring''' def __lowerCamelCase ( A__ = 50 ) -> int: """simple docstring""" UpperCamelCase = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
28
0
'''simple docstring''' import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class __A ( unittest.TestCase ): def _lowercase (self : Tuple ): UpperCAmelCase_ = logging.get_logger() # the current default level is logging.WARNING UpperCAmelCase_ = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ = logging.get_verbosity() UpperCAmelCase_ = logging.get_logger("transformers.models.bart.tokenization_bart" ) UpperCAmelCase_ = "Testing 1, 2, 3" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(__a ) as cl: logger.warning(__a ) self.assertEqual(cl.out , msg + "\n" ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(__a ) as cl: logger.warning(__a ) self.assertEqual(cl.out , "" ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(__a ) as cl: logger.warning(__a ) self.assertEqual(cl.out , msg + "\n" ) # restore to the original level logging.set_verbosity(__a ) @mockenv(TRANSFORMERS_VERBOSITY="error" ) def _lowercase (self : Optional[Any] ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var UpperCAmelCase_ = logging.get_logger("transformers.models.bart.tokenization_bart" ) UpperCAmelCase_ = os.getenv("TRANSFORMERS_VERBOSITY" , __a ) UpperCAmelCase_ = logging.log_levels[env_level_str] UpperCAmelCase_ = logging.get_verbosity() self.assertEqual( __a , __a , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level UpperCAmelCase_ = "" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="super-error" ) def _lowercase (self : Optional[int] ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() UpperCAmelCase_ = logging.logging.getLogger() with CaptureLogger(__a ) as cl: # this action activates the env var logging.get_logger("transformers.models.bart.tokenization_bart" ) self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out ) # no need to restore as nothing was changed def _lowercase (self : Dict ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() UpperCAmelCase_ = logging.get_logger("transformers.models.bart.tokenization_bart" ) UpperCAmelCase_ = "Testing 1, 2, 3" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ): # nothing should be logged as env var disables this method with CaptureLogger(__a ) as cl: logger.warning_advice(__a ) self.assertEqual(cl.out , "" ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(__a ) as cl: logger.warning_advice(__a ) self.assertEqual(cl.out , msg + "\n" ) def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
1
'''simple docstring''' def __lowerCamelCase ( A__ ) -> list: """simple docstring""" UpperCamelCase = len(A__ ) for i in range(1 , A__ ): UpperCamelCase = collection[i] UpperCamelCase = 0 UpperCamelCase = i - 1 while low <= high: UpperCamelCase = (low + high) // 2 if val < collection[mid]: UpperCamelCase = mid - 1 else: UpperCamelCase = mid + 1 for j in range(A__ , A__ , -1 ): UpperCamelCase = collection[j - 1] UpperCamelCase = val return collection if __name__ == "__main__": _lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip() _lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
28
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> int: """simple docstring""" lowercase__ = [False] * len(A ) lowercase__ = [] queue.append(A ) lowercase__ = True while queue: lowercase__ = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(A ) lowercase__ = True lowercase__ = u return visited[t] def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[int]: """simple docstring""" lowercase__ = [-1] * (len(A )) lowercase__ = 0 while bfs(A , A , A , A ): lowercase__ = float('''Inf''' ) lowercase__ = sink while s != source: # Find the minimum value in select path lowercase__ = min(A , graph[parent[s]][s] ) lowercase__ = parent[s] max_flow += path_flow lowercase__ = sink while v != source: lowercase__ = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase__ = parent[v] return max_flow lowerCamelCase : Any = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCamelCase , lowerCamelCase : Tuple = 0, 5 print(ford_fulkerson(graph, source, sink))
2
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = None def __lowerCamelCase ( A__ , A__=0.999 , A__="cosine" , ) -> Tuple: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(A__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A__ ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCamelCase = [] for i in range(A__ ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) ) return torch.tensor(A__ , dtype=torch.floataa ) class SCREAMING_SNAKE_CASE ( _a , _a ): """simple docstring""" @register_to_config def __init__( self : List[str] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : str = "fixed_small_log" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[float] = 1.0 , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : str = "squaredcos_cap_v2" , ): """simple docstring""" if beta_schedule != "squaredcos_cap_v2": raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' ) UpperCamelCase = betas_for_alpha_bar(UpperCamelCase__ ) UpperCamelCase = 1.0 - self.betas UpperCamelCase = torch.cumprod(self.alphas , dim=0 ) UpperCamelCase = torch.tensor(1.0 ) # standard deviation of the initial noise distribution UpperCamelCase = 1.0 # setable values UpperCamelCase = None UpperCamelCase = torch.from_numpy(np.arange(0 , UpperCamelCase__ )[::-1].copy() ) UpperCamelCase = variance_type def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ): """simple docstring""" return sample def A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ): """simple docstring""" UpperCamelCase = num_inference_steps UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) UpperCamelCase = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ ) def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None ): """simple docstring""" if prev_timestep is None: UpperCamelCase = t - 1 UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase = self.betas[t] else: UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: UpperCamelCase = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": UpperCamelCase = torch.log(torch.clamp(UpperCamelCase__ , min=1E-2_0 ) ) UpperCamelCase = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler UpperCamelCase = variance.log() UpperCamelCase = beta.log() UpperCamelCase = (predicted_variance + 1) / 2 UpperCamelCase = frac * max_log + (1 - frac) * min_log return variance def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 ) else: UpperCamelCase = None # 1. compute alphas, betas if prev_timestep is None: UpperCamelCase = t - 1 UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase = self.betas[t] UpperCamelCase = self.alphas[t] else: UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev UpperCamelCase = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCamelCase = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" ' for the UnCLIPScheduler.' ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCamelCase = torch.clamp( UpperCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCamelCase = 0 if t > 0: UpperCamelCase = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ , device=model_output.device ) UpperCamelCase = self._get_variance( UpperCamelCase__ , predicted_variance=UpperCamelCase__ , prev_timestep=UpperCamelCase__ , ) if self.variance_type == "fixed_small_log": UpperCamelCase = variance elif self.variance_type == "learned_range": UpperCamelCase = (0.5 * variance).exp() else: raise ValueError( f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" ' for the UnCLIPScheduler.' ) UpperCamelCase = variance * variance_noise UpperCamelCase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.IntTensor , ): """simple docstring""" UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) UpperCamelCase = timesteps.to(original_samples.device ) UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
28
0
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Tuple = logging.get_logger(__name__) lowercase : Union[str, Any] = { 'huggingface/informer-tourism-monthly': ( 'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json' ), # See all Informer models at https://huggingface.co/models?filter=informer } class A ( __snake_case ): __magic_name__ = '''informer''' __magic_name__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "student_t" , SCREAMING_SNAKE_CASE = "nll" , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "mean" , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 64 , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = "gelu" , SCREAMING_SNAKE_CASE = 0.05 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE = "prob" , SCREAMING_SNAKE_CASE = 5 , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : Any = prediction_length A : Dict = context_length or prediction_length A : List[Any] = distribution_output A : List[str] = loss A : int = input_size A : List[Any] = num_time_features A : str = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] A : List[str] = scaling A : Any = num_dynamic_real_features A : str = num_static_real_features A : Optional[int] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) A : str = cardinality else: A : List[Any] = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) A : int = embedding_dimension else: A : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] A : int = num_parallel_samples # Transformer architecture configuration A : str = input_size * len(self.lags_sequence ) + self._number_of_features A : Dict = d_model A : int = encoder_attention_heads A : Optional[Any] = decoder_attention_heads A : Union[str, Any] = encoder_ffn_dim A : int = decoder_ffn_dim A : Tuple = encoder_layers A : List[str] = decoder_layers A : Optional[int] = dropout A : List[Any] = attention_dropout A : List[Any] = activation_dropout A : int = encoder_layerdrop A : str = decoder_layerdrop A : Optional[Any] = activation_function A : List[str] = init_std A : List[str] = use_cache # Informer A : List[Any] = attention_type A : Tuple = sampling_factor A : Dict = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
3
'''simple docstring''' import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = num_stages UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = initializer_range UpperCamelCase = out_features UpperCamelCase = out_indices UpperCamelCase = scope def A ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def A ( self : List[str] ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = ConvNextModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( {"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def A ( self : Tuple ): """simple docstring""" UpperCamelCase = ConvNextModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : List[str] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : Optional[int] ): """simple docstring""" return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def A ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def A ( self : List[Any] ): """simple docstring""" pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def A ( self : Optional[int] ): """simple docstring""" pass def A ( self : Any ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(UpperCamelCase__ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def A ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ): UpperCamelCase = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def A ( self : Dict ): """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def __lowerCamelCase ( ) -> Any: """simple docstring""" UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def A ( self : Optional[Any] ): """simple docstring""" return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**UpperCamelCase__ ) # verify the logits UpperCamelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else () _SCREAMING_SNAKE_CASE = ConvNextConfig _SCREAMING_SNAKE_CASE = False def A ( self : Tuple ): """simple docstring""" UpperCamelCase = ConvNextModelTester(self )
28
0
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __snake_case =logging.get_logger(__name__) def a_ ( lowerCamelCase : Any ): lowerCAmelCase = OrderedDict() for key, value in state_dict.items(): if key.startswith('module.encoder' ): lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' ) if key.startswith('module.decoder' ): lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )] lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' ) if "norm" in key: lowerCAmelCase = key.replace('norm' , 'layer_norm' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )] lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' ) if "layer_norm1" in key: lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase = key[key.find('block' ) + len('block' )] lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' ) if "attn.q" in key: lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowerCAmelCase = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowerCAmelCase = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowerCAmelCase = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowerCAmelCase = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )] lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' ) if "bot_conv" in key: lowerCAmelCase = key.replace('bot_conv' , '0.convolution' ) if "skip_conv1" in key: lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' ) if "skip_conv2" in key: lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' ) if "fusion1" in key: lowerCAmelCase = key.replace('fusion1' , '1.fusion' ) if "fusion2" in key: lowerCAmelCase = key.replace('fusion2' , '2.fusion' ) if "fusion3" in key: lowerCAmelCase = key.replace('fusion3' , '3.fusion' ) if "fusion" in key and "conv" in key: lowerCAmelCase = key.replace('conv' , 'convolutional_layer' ) if key.startswith('module.last_layer_depth' ): lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' ) lowerCAmelCase = value return new_state_dict def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowerCAmelCase = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase = kv_bias[config.hidden_sizes[i] :] def a_ ( ): lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return image @torch.no_grad() def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ): lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) lowerCAmelCase = GLPNImageProcessor() # prepare image lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values logger.info('Converting model...' ) # load original state dict lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) ) # rename keys lowerCAmelCase = rename_keys(lowerCamelCase ) # key and value matrices need special treatment read_in_k_v(lowerCamelCase , lowerCamelCase ) # create HuggingFace model and load state dict lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() # forward pass lowerCAmelCase = model(lowerCamelCase ) lowerCAmelCase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase = torch.tensor( [[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] ) elif "kitti" in model_name: lowerCAmelCase = torch.tensor( [[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] ) else: raise ValueError(f'''Unknown model name: {model_name}''' ) lowerCAmelCase = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 ) print('Looks ok!' ) # finally, push to hub if required if push_to_hub: logger.info('Pushing model and image processor to the hub...' ) model.push_to_hub( repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , ) if __name__ == "__main__": __snake_case =argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) __snake_case =parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
4
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : int = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _lowerCamelCase : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict: """simple docstring""" UpperCamelCase = state_dict.pop(A__ ) UpperCamelCase = val def __lowerCamelCase ( A__ ) -> int: """simple docstring""" UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def __lowerCamelCase ( A__ ) -> Dict: """simple docstring""" UpperCamelCase = '' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase = state_dict.pop( F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase = in_proj_weight_cross_attn[:256, :] UpperCamelCase = in_proj_bias_cross_attn[:256] UpperCamelCase = in_proj_weight_cross_attn[256:512, :] UpperCamelCase = in_proj_bias_cross_attn[256:512] UpperCamelCase = in_proj_weight_cross_attn[-256:, :] UpperCamelCase = in_proj_bias_cross_attn[-256:] def __lowerCamelCase ( A__ , A__ ) -> Optional[int]: """simple docstring""" UpperCamelCase , UpperCamelCase = image.size UpperCamelCase = max(A__ , A__ ) UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000 UpperCamelCase = target_max_size / current_max_size UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def __lowerCamelCase ( A__ ) -> List[Any]: """simple docstring""" UpperCamelCase = F.to_tensor(A__ ) UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]: """simple docstring""" logger.info('Converting model...' ) # load original state dict UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' ) # rename keys for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) UpperCamelCase = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = 'model.' for key in state_dict.copy().keys(): if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): UpperCamelCase = state_dict.pop(A__ ) UpperCamelCase = val # create HuggingFace model and load state dict UpperCamelCase = TableTransformerConfig( backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase = 15 UpperCamelCase = 2 UpperCamelCase = {0: 'table', 1: 'table rotated'} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} else: UpperCamelCase = 125 UpperCamelCase = 6 UpperCamelCase = { 0: 'table', 1: 'table column', 2: 'table row', 3: 'table column header', 4: 'table projected row header', 5: 'table spanning cell', } UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} UpperCamelCase = DetrImageProcessor( format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 ) UpperCamelCase = TableTransformerForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() # verify our conversion UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png' UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ ) UpperCamelCase = Image.open(A__ ).convert('RGB' ) UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 ) UpperCamelCase = model(A__ ) if "detection" in checkpoint_url: UpperCamelCase = (1, 15, 3) UpperCamelCase = torch.tensor( [[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] ) UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] ) else: UpperCamelCase = (1, 125, 7) UpperCamelCase = torch.tensor( [[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] ) UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if push_to_hub: # Push model to HF hub logger.info('Pushing model to the hub...' ) UpperCamelCase = ( 'microsoft/table-transformer-detection' if 'detection' in checkpoint_url else 'microsoft/table-transformer-structure-recognition' ) model.push_to_hub(A__ ) image_processor.push_to_hub(A__ ) if __name__ == "__main__": _lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", type=str, choices=[ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", ], help="URL of the Table Transformer checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowerCamelCase : int = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
28
0
from __future__ import annotations from collections import namedtuple def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> tuple: """simple docstring""" _lowercase =namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
5
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowerCamelCase : Any = logging.get_logger(__name__) @add_end_docstrings(_a ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ): """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) requires_backends(self , 'decord' ) self.check_model_type(UpperCamelCase__ ) def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ): """simple docstring""" UpperCamelCase = {} if frame_sampling_rate is not None: UpperCamelCase = frame_sampling_rate if num_frames is not None: UpperCamelCase = num_frames UpperCamelCase = {} if top_k is not None: UpperCamelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ): """simple docstring""" return super().__call__(UpperCamelCase__ , **UpperCamelCase__ ) def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ): """simple docstring""" if num_frames is None: UpperCamelCase = self.model.config.num_frames if video.startswith('http://' ) or video.startswith('https://' ): UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content ) UpperCamelCase = VideoReader(UpperCamelCase__ ) videoreader.seek(0 ) UpperCamelCase = 0 UpperCamelCase = num_frames * frame_sampling_rate - 1 UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa ) UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy() UpperCamelCase = list(UpperCamelCase__ ) UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework ) return model_inputs def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = self.model(**UpperCamelCase__ ) return model_outputs def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCamelCase = self.model.config.num_labels if self.framework == "pt": UpperCamelCase = model_outputs.logits.softmax(-1 )[0] UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ ) else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCamelCase = scores.tolist() UpperCamelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
28
0
from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake A : Any = numpy.array([0, 0]) A : int = numpy.array([0.5, 0.8660254]) A : int = numpy.array([1, 0]) A : Optional[int] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def __lowerCAmelCase ( a__ , a__ ) -> list[numpy.ndarray]: __a = initial_vectors for _ in range(a__ ): __a = iteration_step(a__ ) return vectors def __lowerCAmelCase ( a__ ) -> list[numpy.ndarray]: __a = [] for i, start_vector in enumerate(vectors[:-1] ): __a = vectors[i + 1] new_vectors.append(a__ ) __a = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def __lowerCAmelCase ( a__ , a__ ) -> numpy.ndarray: __a = numpy.radians(a__ ) __a , __a = numpy.cos(a__ ), numpy.sin(a__ ) __a = numpy.array(((c, -s), (s, c)) ) return numpy.dot(a__ , a__ ) def __lowerCAmelCase ( a__ ) -> None: __a = plt.gca() axes.set_aspect('''equal''' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() __a , __a = zip(*a__ ) plt.plot(a__ , a__ ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() A : Dict = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
6
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _lowerCamelCase : Optional[int] = ( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) _lowerCamelCase : Union[str, Any] = ( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) _lowerCamelCase : Dict = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) _lowerCamelCase : Dict = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) _lowerCamelCase : Optional[Any] = ( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]), ("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [14, 13, 12, 11, 10]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) _lowerCamelCase : List[Any] = ( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) _lowerCamelCase : List[str] = ( ("JH AH TH KH QH", 23), ("JH 9H TH KH QH", 22), ("JC KH JS JD JH", 21), ("KH KC 3S 3H 3D", 20), ("8C 9C 5C 3C TC", 19), ("JS QS 9H TS KH", 18), ("7C 7S KH 2H 7H", 17), ("3C KH 5D 5S KH", 16), ("QH 8H KD JH 8S", 15), ("2D 6D 9D TH 7D", 14), ) def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) ) UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]: """simple docstring""" return (generate_random_hand() for _ in range(A__ )) @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" assert PokerHand(A__ )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" assert PokerHand(A__ )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , A__ ) def __lowerCamelCase ( A__ , A__ , A__ ) -> str: """simple docstring""" UpperCamelCase = PokerHand(A__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Dict: """simple docstring""" assert PokerHand(A__ )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> str: """simple docstring""" assert PokerHand(A__ )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , A__ ) def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple: """simple docstring""" assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]: """simple docstring""" assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected def __lowerCamelCase ( ) -> str: """simple docstring""" UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS] UpperCamelCase = poker_hands.copy() shuffle(A__ ) UpperCamelCase = chain(sorted(A__ ) ) for index, hand in enumerate(A__ ): assert hand == poker_hands[index] def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" # Test that five high straights are compared correctly. UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=A__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def __lowerCamelCase ( ) -> str: """simple docstring""" # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. UpperCamelCase = PokerHand('2C 4S AS 3D 5C' ) UpperCamelCase = True UpperCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def __lowerCamelCase ( ) -> List[str]: """simple docstring""" # Problem number 54 from Project Euler # Testing from poker_hands.txt file UpperCamelCase = 0 UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) ) UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' ) with open(A__ ) as file_hand: for line in file_hand: UpperCamelCase = line[:14].strip() UpperCamelCase = line[15:].strip() UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ ) UpperCamelCase = player.compare_with(A__ ) if output == "Win": answer += 1 assert answer == 376
28
0
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = ['image_processor', 'tokenizer'] lowerCamelCase = 'BridgeTowerImageProcessor' lowerCamelCase = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self : Dict,lowercase_ : int,lowercase_ : Dict )-> Optional[Any]: '''simple docstring''' super().__init__(lowercase_,lowercase_ ) def __call__( self : str,lowercase_ : Dict,lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,lowercase_ : bool = True,lowercase_ : Union[bool, str, PaddingStrategy] = False,lowercase_ : Union[bool, str, TruncationStrategy] = None,lowercase_ : Optional[int] = None,lowercase_ : int = 0,lowercase_ : Optional[int] = None,lowercase_ : Optional[bool] = None,lowercase_ : Optional[bool] = None,lowercase_ : bool = False,lowercase_ : bool = False,lowercase_ : bool = False,lowercase_ : bool = False,lowercase_ : bool = True,lowercase_ : Optional[Union[str, TensorType]] = None,**lowercase_ : Dict,)-> BatchEncoding: '''simple docstring''' A__ = self.tokenizer( text=lowercase_,add_special_tokens=lowercase_,padding=lowercase_,truncation=lowercase_,max_length=lowercase_,stride=lowercase_,pad_to_multiple_of=lowercase_,return_token_type_ids=lowercase_,return_attention_mask=lowercase_,return_overflowing_tokens=lowercase_,return_special_tokens_mask=lowercase_,return_offsets_mapping=lowercase_,return_length=lowercase_,verbose=lowercase_,return_tensors=lowercase_,**lowercase_,) # add pixel_values + pixel_mask A__ = self.image_processor( lowercase_,return_tensors=lowercase_,do_normalize=lowercase_,do_center_crop=lowercase_,**lowercase_ ) encoding.update(lowercase_ ) return encoding def snake_case__ ( self : Tuple,*lowercase_ : str,**lowercase_ : str )-> List[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*lowercase_,**lowercase_ ) def snake_case__ ( self : str,*lowercase_ : int,**lowercase_ : List[Any] )-> Tuple: '''simple docstring''' return self.tokenizer.decode(*lowercase_,**lowercase_ ) @property def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' A__ = self.tokenizer.model_input_names A__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
7
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = None class SCREAMING_SNAKE_CASE ( _a , _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 2 @register_to_config def __init__( self : Union[str, Any] , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 1.0_0_7 , UpperCamelCase__ : float = 8_0 , UpperCamelCase__ : float = 0.0_5 , UpperCamelCase__ : float = 5_0 , ): """simple docstring""" UpperCamelCase = sigma_max # setable values UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None # sigma(t_i) def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ): """simple docstring""" return sample def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ): """simple docstring""" UpperCamelCase = num_inference_steps UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy() UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ ) UpperCamelCase = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] UpperCamelCase = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ ) def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : Optional[torch.Generator] = None ): """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: UpperCamelCase = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device ) UpperCamelCase = sigma + gamma * sigma UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = sample_hat + sigma_hat * model_output UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = sample_prev + sigma_prev * model_output UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ): """simple docstring""" raise NotImplementedError()
28
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase_ = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwinForImageClassification''', '''SwinForMaskedImageModeling''', '''SwinModel''', '''SwinPreTrainedModel''', '''SwinBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSwinForImageClassification''', '''TFSwinForMaskedImageModeling''', '''TFSwinModel''', '''TFSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Tuple = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : List[str] =logging.get_logger(__name__) def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Any = SwinConfig( embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , ) __SCREAMING_SNAKE_CASE : Tuple = DetaConfig( backbone_config=lowercase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowercase__ , with_box_refine=lowercase__ , two_stage=lowercase__ , ) # set labels __SCREAMING_SNAKE_CASE : Tuple = '''huggingface/label-files''' if "o365" in model_name: __SCREAMING_SNAKE_CASE : Dict = 366 __SCREAMING_SNAKE_CASE : Any = '''object365-id2label.json''' else: __SCREAMING_SNAKE_CASE : List[str] = 91 __SCREAMING_SNAKE_CASE : List[Any] = '''coco-detection-id2label.json''' __SCREAMING_SNAKE_CASE : List[str] = num_labels __SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='''dataset''' ) ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Dict = {int(lowercase__ ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : Optional[Any] = idalabel __SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in idalabel.items()} return config def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : int = [] # stem # fmt: off rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') ) rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') ) rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') ) rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') ) rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') ) rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') ) rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') ) # fmt: on return rename_keys def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Tuple = dct.pop(lowercase__ ) __SCREAMING_SNAKE_CASE : Any = val def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' ) __SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __SCREAMING_SNAKE_CASE : Any = in_proj_weight[:dim, :] __SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[: dim] __SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = in_proj_bias[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[ -dim :, : ] __SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[-dim :] # fmt: on def _UpperCamelCase ( lowercase__ , lowercase__ ): # transformer decoder self-attention layers __SCREAMING_SNAKE_CASE : Any = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention __SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) __SCREAMING_SNAKE_CASE : Any = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict __SCREAMING_SNAKE_CASE : Any = in_proj_weight[:hidden_size, :] __SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[:hidden_size] __SCREAMING_SNAKE_CASE : str = in_proj_weight[ hidden_size : hidden_size * 2, : ] __SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2] __SCREAMING_SNAKE_CASE : int = in_proj_weight[-hidden_size:, :] __SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-hidden_size:] def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : int = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __SCREAMING_SNAKE_CASE : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im @torch.no_grad() def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Tuple = get_deta_config(lowercase__ ) # load original state dict if model_name == "deta-swin-large": __SCREAMING_SNAKE_CASE : Union[str, Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' ) elif model_name == "deta-swin-large-o365": __SCREAMING_SNAKE_CASE : Optional[Any] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' ) else: raise ValueError(F'''Model name {model_name} not supported''' ) __SCREAMING_SNAKE_CASE : Dict = torch.load(lowercase__ , map_location='''cpu''' )['''model'''] # original state dict for name, param in state_dict.items(): print(lowercase__ , param.shape ) # rename keys __SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_swin_q_k_v(lowercase__ , config.backbone_config ) read_in_decoder_q_k_v(lowercase__ , lowercase__ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: __SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ ) __SCREAMING_SNAKE_CASE : List[str] = val if "input_proj" in key: __SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(lowercase__ ) __SCREAMING_SNAKE_CASE : str = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: __SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(lowercase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = val # finally, create HuggingFace model and load state dict __SCREAMING_SNAKE_CASE : List[str] = DetaForObjectDetection(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() __SCREAMING_SNAKE_CASE : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' model.to(lowercase__ ) # load image processor __SCREAMING_SNAKE_CASE : Union[str, Any] = DetaImageProcessor(format='''coco_detection''' ) # verify our conversion on image __SCREAMING_SNAKE_CASE : Tuple = prepare_img() __SCREAMING_SNAKE_CASE : Optional[Any] = processor(images=lowercase__ , return_tensors='''pt''' ) __SCREAMING_SNAKE_CASE : Optional[int] = encoding['''pixel_values'''] __SCREAMING_SNAKE_CASE : Union[str, Any] = model(pixel_values.to(lowercase__ ) ) # verify logits print('''Logits:''' , outputs.logits[0, :3, :3] ) print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": __SCREAMING_SNAKE_CASE : str = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowercase__ ) , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowercase__ ) , atol=1e-4 ) print('''Everything ok!''' ) if pytorch_dump_folder_path: # Save model and processor logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) model.save_pretrained(lowercase__ ) processor.save_pretrained(lowercase__ ) # Push to hub if push_to_hub: print('''Pushing model and processor to hub...''' ) model.push_to_hub(F'''jozhang97/{model_name}''' ) processor.push_to_hub(F'''jozhang97/{model_name}''' ) if __name__ == "__main__": __lowerCAmelCase : str =argparse.ArgumentParser() parser.add_argument( '--model_name', type=str, default='deta-swin-large', choices=['deta-swin-large', 'deta-swin-large-o365'], help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : List[str] =parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
9
'''simple docstring''' def __lowerCamelCase ( A__ = 10**9 ) -> int: """simple docstring""" UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
28
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = 42 class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' @register_to_config def __init__(self : str , UpperCAmelCase_ : int = 65_536 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "fourier" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Tuple[int] = (32, 32, 64) , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = False , ) ->Tuple: '''simple docstring''' super().__init__() lowerCamelCase__: Tuple =sample_size # time if time_embedding_type == "fourier": lowerCamelCase__: Optional[int] =GaussianFourierProjection( embedding_size=8 , set_W_to_weight=UpperCAmelCase_ , log=UpperCAmelCase_ , flip_sin_to_cos=UpperCAmelCase_) lowerCamelCase__: int =2 * block_out_channels[0] elif time_embedding_type == "positional": lowerCamelCase__: str =Timesteps( block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase_ , downscale_freq_shift=UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =block_out_channels[0] if use_timestep_embedding: lowerCamelCase__: List[Any] =block_out_channels[0] * 4 lowerCamelCase__: Tuple =TimestepEmbedding( in_channels=UpperCAmelCase_ , time_embed_dim=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , out_dim=block_out_channels[0] , ) lowerCamelCase__: List[str] =nn.ModuleList([]) lowerCamelCase__: List[str] =None lowerCamelCase__: List[str] =nn.ModuleList([]) lowerCamelCase__: str =None # down lowerCamelCase__: Optional[Any] =in_channels for i, down_block_type in enumerate(UpperCAmelCase_): lowerCamelCase__: Optional[Any] =output_channel lowerCamelCase__: List[str] =block_out_channels[i] if i == 0: input_channel += extra_in_channels lowerCamelCase__: Optional[Any] =i == len(UpperCAmelCase_) - 1 lowerCamelCase__: int =get_down_block( UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(UpperCAmelCase_) # mid lowerCamelCase__: Any =get_mid_block( UpperCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase_ , add_downsample=UpperCAmelCase_ , ) # up lowerCamelCase__: Optional[Any] =list(reversed(UpperCAmelCase_)) lowerCamelCase__: Optional[Any] =reversed_block_out_channels[0] if out_block_type is None: lowerCamelCase__: Tuple =out_channels else: lowerCamelCase__: Optional[int] =block_out_channels[0] for i, up_block_type in enumerate(UpperCAmelCase_): lowerCamelCase__: int =output_channel lowerCamelCase__: str =( reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_) - 1 else final_upsample_channels ) lowerCamelCase__: Union[str, Any] =i == len(UpperCAmelCase_) - 1 lowerCamelCase__: Any =get_up_block( UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(UpperCAmelCase_) lowerCamelCase__: Optional[int] =output_channel # out lowerCamelCase__: Optional[Any] =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32) lowerCamelCase__: Union[str, Any] =get_out_block( out_block_type=UpperCAmelCase_ , num_groups_out=UpperCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , ) def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[torch.Tensor, float, int] , UpperCAmelCase_ : bool = True , ) ->Union[UNetaDOutput, Tuple]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =timestep if not torch.is_tensor(UpperCAmelCase_): lowerCamelCase__: Any =torch.tensor([timesteps] , dtype=torch.long , device=sample.device) elif torch.is_tensor(UpperCAmelCase_) and len(timesteps.shape) == 0: lowerCamelCase__: Optional[Any] =timesteps[None].to(sample.device) lowerCamelCase__: Union[str, Any] =self.time_proj(UpperCAmelCase_) if self.config.use_timestep_embedding: lowerCamelCase__: Dict =self.time_mlp(UpperCAmelCase_) else: lowerCamelCase__: Optional[Any] =timestep_embed[..., None] lowerCamelCase__: Union[str, Any] =timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) lowerCamelCase__: List[str] =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down lowerCamelCase__: List[str] =() for downsample_block in self.down_blocks: lowerCamelCase__ , lowerCamelCase__: Any =downsample_block(hidden_states=UpperCAmelCase_ , temb=UpperCAmelCase_) down_block_res_samples += res_samples # 3. mid if self.mid_block: lowerCamelCase__: int =self.mid_block(UpperCAmelCase_ , UpperCAmelCase_) # 4. up for i, upsample_block in enumerate(self.up_blocks): lowerCamelCase__: Union[str, Any] =down_block_res_samples[-1:] lowerCamelCase__: Optional[int] =down_block_res_samples[:-1] lowerCamelCase__: List[str] =upsample_block(UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , temb=UpperCAmelCase_) # 5. post-process if self.out_block: lowerCamelCase__: List[Any] =self.out_block(UpperCAmelCase_ , UpperCAmelCase_) if not return_dict: return (sample,) return UNetaDOutput(sample=UpperCAmelCase_)
10
'''simple docstring''' import math class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1 """simple docstring""" UpperCamelCase = n UpperCamelCase = [ [math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ ) ] # adjacency matrix for weight UpperCamelCase = [ [math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ ) ] # dp[i][j] stores minimum distance from i to j def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = w def A ( self : str ): """simple docstring""" for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ): """simple docstring""" return self.dp[u][v] if __name__ == "__main__": _lowerCamelCase : List[str] = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
28
0
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'nielsr/canine-s': 20_48, } # Unicode defines 1,114,112 total “codepoints” lowerCAmelCase__ = 1_11_41_12 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py lowerCAmelCase__ = 0 lowerCAmelCase__ = 0xE000 lowerCAmelCase__ = 0xE001 lowerCAmelCase__ = 0xE002 lowerCAmelCase__ = 0xE003 lowerCAmelCase__ = 0xE004 # Maps special codepoints to human-readable names. lowerCAmelCase__ = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. lowerCAmelCase__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __lowerCamelCase=chr(__lowerCamelCase) , __lowerCamelCase=chr(__lowerCamelCase) , __lowerCamelCase=chr(__lowerCamelCase) , __lowerCamelCase=chr(__lowerCamelCase) , __lowerCamelCase=chr(__lowerCamelCase) , __lowerCamelCase=chr(__lowerCamelCase) , __lowerCamelCase=False , __lowerCamelCase=2_0_4_8 , **__lowerCamelCase , ) -> Optional[Any]: _A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else bos_token _A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else eos_token _A : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else sep_token _A : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else cls_token _A : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else pad_token # Mask token behave like a normal word, i.e. include the space before it _A : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token super().__init__( bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , model_max_length=__lowerCamelCase , **__lowerCamelCase , ) # Creates a mapping for looking up the IDs of special symbols. _A : Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): _A : Union[str, Any] = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. _A : Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } _A : Dict = UNICODE_VOCAB_SIZE _A : Optional[int] = len(self._special_codepoints) @property def _lowerCamelCase ( self) -> int: return self._unicode_vocab_size def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]: return list(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> int: try: return ord(__lowerCamelCase) except TypeError: raise ValueError(F"invalid token: '{token}'") def _lowerCamelCase ( self , __lowerCamelCase) -> str: try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(__lowerCamelCase) except TypeError: raise ValueError(F"invalid id: {index}") def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[int]: return "".join(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : Tuple = [self.sep_token_id] _A : int = [self.cls_token_id] _A : Any = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase) _A : Optional[Any] = [1] + ([0] * len(__lowerCamelCase)) + [1] if token_ids_a is not None: result += ([0] * len(__lowerCamelCase)) + [1] return result def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : Optional[Any] = [self.sep_token_id] _A : List[str] = [self.cls_token_id] _A : Tuple = len(cls + token_ids_a + sep) * [0] if token_ids_a is not None: result += len(token_ids_a + sep) * [1] return result def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Union[str, Any]: return ()
11
'''simple docstring''' _lowerCamelCase : int = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
28
0
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase__( unittest.TestCase): def __init__( self: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any]=7 , UpperCamelCase_: int=3 , UpperCamelCase_: str=18 , UpperCamelCase_: List[Any]=30 , UpperCamelCase_: Tuple=4_00 , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=None , UpperCamelCase_: Dict=True , ): __lowerCamelCase = size if size is not None else {"""height""": 18, """width""": 18} __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = min_resolution __lowerCamelCase = max_resolution __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = apply_ocr def lowerCAmelCase__ ( self: int ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = LayoutLMvaImageProcessingTester(self ) @property def lowerCAmelCase__ ( self: Any ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase_ , """size""" ) ) self.assertTrue(hasattr(UpperCamelCase_ , """apply_ocr""" ) ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def lowerCAmelCase__ ( self: Union[str, Any] ): pass def lowerCAmelCase__ ( self: Optional[int] ): # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , UpperCamelCase_ ) self.assertIsInstance(encoding.boxes , UpperCamelCase_ ) # Test batched __lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase__ ( self: Tuple ): # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase__ ( self: List[str] ): # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase__ ( self: Union[str, Any] ): # with apply_OCR = True __lowerCamelCase = LayoutLMvaImageProcessor() from datasets import load_dataset __lowerCamelCase = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" ) __lowerCamelCase = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) __lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __lowerCamelCase = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 __lowerCamelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCamelCase_ ) self.assertListEqual(encoding.boxes , UpperCamelCase_ ) # with apply_OCR = False __lowerCamelCase = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase_ ) __lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
12
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCamelCase : List[Any] = { "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
0
from sklearn.metrics import recall_score import datasets lowerCAmelCase : Optional[int] = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ lowerCAmelCase : int = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ lowerCAmelCase : Tuple = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[str]): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32")), "references": datasets.Sequence(datasets.Value("int32")), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32"), "references": datasets.Value("int32"), }) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : List[str]="binary" , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str="warn" , ): SCREAMING_SNAKE_CASE_: List[str] = recall_score( lowerCAmelCase__ , lowerCAmelCase__ , labels=lowerCAmelCase__ , pos_label=lowerCAmelCase__ , average=lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , zero_division=lowerCAmelCase__ , ) return {"recall": float(lowerCAmelCase__) if score.size == 1 else score}
13
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict: """simple docstring""" UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T return jnp.matmul(A__ , norm_emb_a.T ) class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = jnp.floataa def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config ) UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype ) UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) ) UpperCamelCase = self.param( 'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) ) UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) ) UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) ) def __call__( self : str , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = self.vision_model(UpperCamelCase__ )[1] UpperCamelCase = self.visual_projection(UpperCamelCase__ ) UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds ) UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs UpperCamelCase = 0.0 UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment UpperCamelCase = jnp.round(UpperCamelCase__ , 3 ) UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ ) # Use a lower threshold if an image has any special care concept UpperCamelCase = is_special_care * 0.0_1 UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment UpperCamelCase = jnp.round(UpperCamelCase__ , 3 ) UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = CLIPConfig _SCREAMING_SNAKE_CASE = """clip_input""" _SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ): """simple docstring""" if input_shape is None: UpperCamelCase = (1, 2_2_4, 2_2_4, 3) UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ ) super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init ) def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ): """simple docstring""" UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ ) UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng} UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params'] return random_params def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ): """simple docstring""" UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) ) return self.module.apply( {'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
28
0
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCamelCase : Union[str, Any] = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } _lowerCamelCase : str = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''mask2former''' UpperCAmelCase__ = ['''swin'''] UpperCAmelCase__ = {'''hidden_size''': '''hidden_dim'''} def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Dict] = None , UpperCAmelCase__ : int = 256 , UpperCAmelCase__ : int = 256 , UpperCAmelCase__ : int = 256 , UpperCAmelCase__ : int = 1_024 , UpperCAmelCase__ : str = "relu" , UpperCAmelCase__ : int = 6 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 2_048 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 255 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 2.0 , UpperCAmelCase__ : float = 5.0 , UpperCAmelCase__ : float = 5.0 , UpperCAmelCase__ : int = 12_544 , UpperCAmelCase__ : float = 3.0 , UpperCAmelCase__ : float = 0.75 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : List[int] = [4, 8, 16, 32] , UpperCAmelCase__ : bool = None , **UpperCAmelCase__ : Union[str, Any] , ) ->Optional[Any]: '''simple docstring''' if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''') A__ = CONFIG_MAPPING['''swin''']( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCAmelCase__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = backbone_config.pop('''model_type''') A__ = CONFIG_MAPPING[backbone_model_type] A__ = config_class.from_dict(UpperCAmelCase__) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {",".join(self.backbones_supported)}""") A__ = backbone_config A__ = feature_size A__ = mask_feature_size A__ = hidden_dim A__ = encoder_feedforward_dim A__ = activation_function A__ = encoder_layers A__ = decoder_layers A__ = num_attention_heads A__ = dropout A__ = dim_feedforward A__ = pre_norm A__ = enforce_input_projection A__ = common_stride A__ = ignore_value A__ = num_queries A__ = no_object_weight A__ = class_weight A__ = mask_weight A__ = dice_weight A__ = train_num_points A__ = oversample_ratio A__ = importance_sample_ratio A__ = init_std A__ = init_xavier_std A__ = use_auxiliary_loss A__ = feature_strides A__ = output_auxiliary_logits A__ = decoder_layers super().__init__(**UpperCAmelCase__) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[Any] , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : List[Any]) ->str: '''simple docstring''' return cls( backbone_config=UpperCAmelCase__ , **UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict[str, any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.backbone_config.to_dict() A__ = self.__class__.model_type return output
14
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _lowerCamelCase : str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ): """simple docstring""" warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
28
0
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict: """simple docstring""" __A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes if new_olid.count("/" ) != 1: __A = F'''{olid} is not a valid Open Library olid''' raise ValueError(a_ ) return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json() def UpperCAmelCase ( a_ ) -> dict: """simple docstring""" __A = { "title": "Title", "publish_date": "Publish date", "authors": "Authors", "number_of_pages": "Number of pages:", "first_sentence": "First sentence", "isbn_10": "ISBN (10)", "isbn_13": "ISBN (13)", } __A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __A = [ get_openlibrary_data(author["key"] )["name"] for author in data["Authors"] ] __A = data["First sentence"]["value"] for key, value in data.items(): if isinstance(a_ , a_ ): __A = ", ".join(a_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: SCREAMING_SNAKE_CASE :List[Any] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
15
'''simple docstring''' import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed _lowerCamelCase : Optional[int] = logging.getLogger(__name__) def __lowerCamelCase ( A__=2 , A__=3 , A__=16 , A__ = 10 , A__ = 2 ) -> int: """simple docstring""" def get_dataset(A__ ): UpperCamelCase = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCamelCase = get_dataset(A__ ) UpperCamelCase = get_dataset(A__ ) UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__=None ) -> int: """simple docstring""" UpperCamelCase = [] for epoch in range(A__ ): # Train quickly model.train() for batch in dataloader: UpperCamelCase , UpperCamelCase = batch UpperCamelCase = model(A__ ) UpperCamelCase = torch.nn.functional.mse_loss(A__ , A__ ) accelerator.backward(A__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self : Tuple ): """simple docstring""" super().__init__() UpperCamelCase = nn.Parameter(torch.randn(1 ) ) UpperCamelCase = nn.Parameter(torch.randn(1 ) ) def A ( self : str , UpperCamelCase__ : Dict ): """simple docstring""" return x * self.a + self.b class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def A ( self : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase__ , automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def A ( self : Optional[int] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() # Train baseline UpperCamelCase = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial UpperCamelCase = os.path.join(UpperCamelCase__ , 'initial' ) accelerator.save_state(UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) accelerator.load_state(UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save everything UpperCamelCase = os.path.join(UpperCamelCase__ , 'checkpoint' ) accelerator.save_state(UpperCamelCase__ ) # Load everything back in and make sure all states work accelerator.load_state(UpperCamelCase__ ) test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase__ ) UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch.tensor([1, 2, 3] ) UpperCamelCase = torch.tensor([2, 3, 4] ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(net.parameters() ) UpperCamelCase = Accelerator() with self.assertRaises(UpperCamelCase__ ) as ve: accelerator.register_for_checkpointing(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def A ( self : Dict ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase__ , step_size=1 , gamma=0.9_9 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() UpperCamelCase = scheduler.state_dict() train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertNotEqual(UpperCamelCase__ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(UpperCamelCase__ , scheduler.state_dict() ) def A ( self : List[str] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ , total_limit=2 ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase = accelerator.prepare(UpperCamelCase__ ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def A ( self : Dict ): """simple docstring""" UpperCamelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() ) if __name__ == "__main__": _lowerCamelCase : Optional[int] = "/tmp/accelerate/state_checkpointing" _lowerCamelCase : Union[str, Any] = DummyModel() _lowerCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3) _lowerCamelCase : List[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) _lowerCamelCase ,_lowerCamelCase : Tuple = dummy_dataloaders() _lowerCamelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline _lowerCamelCase : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Union[str, Any] = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) _lowerCamelCase ,_lowerCamelCase : Tuple = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: _lowerCamelCase : Any = group["params"][0].device break assert param_device.type == accelerator.device.type _lowerCamelCase : Tuple = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: _lowerCamelCase : Optional[Any] = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: _lowerCamelCase : Dict = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
28
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase_ = [ 'small', 'small-base', 'medium', 'medium-base', 'intermediate', 'intermediate-base', 'large', 'large-base', 'xlarge', 'xlarge-base', ] lowerCAmelCase_ = { 'vocab_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json', 'funnel-transformer/small-base': ( 'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json' ), 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json', 'funnel-transformer/large-base': ( 'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json' ), 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json' ), }, } lowerCAmelCase_ = {F'''funnel-transformer/{name}''': 512 for name in _model_names} lowerCAmelCase_ = {F'''funnel-transformer/{name}''': {'do_lower_case': True} for name in _model_names} class __A ( A_ ): '''simple docstring''' lowerCAmelCase : str = VOCAB_FILES_NAMES lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : int = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : Dict = FunnelTokenizer lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : int = 2 def __init__( self : Optional[int] ,_snake_case : Dict=None ,_snake_case : List[str]=None ,_snake_case : Optional[Any]=True ,_snake_case : Optional[int]="<unk>" ,_snake_case : Dict="<sep>" ,_snake_case : Any="<pad>" ,_snake_case : str="<cls>" ,_snake_case : Optional[Any]="<mask>" ,_snake_case : int="<s>" ,_snake_case : Dict="</s>" ,_snake_case : Optional[int]=True ,_snake_case : List[str]=True ,_snake_case : Dict=None ,_snake_case : str="##" ,**_snake_case : Optional[Any] ,) -> Any: """simple docstring""" super().__init__( _snake_case ,tokenizer_file=_snake_case ,do_lower_case=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,clean_text=_snake_case ,tokenize_chinese_chars=_snake_case ,strip_accents=_snake_case ,wordpieces_prefix=_snake_case ,**_snake_case ,) lowercase__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' ,_snake_case ) != do_lower_case or normalizer_state.get('''strip_accents''' ,_snake_case ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' ,_snake_case ) != tokenize_chinese_chars ): lowercase__ : List[str] = getattr(_snake_case ,normalizer_state.pop('''type''' ) ) lowercase__ : List[str] = do_lower_case lowercase__ : Any = strip_accents lowercase__ : Union[str, Any] = tokenize_chinese_chars lowercase__ : Union[str, Any] = normalizer_class(**_snake_case ) lowercase__ : List[str] = do_lower_case def UpperCAmelCase ( self : int ,_snake_case : Tuple ,_snake_case : Tuple=None ) -> Optional[Any]: """simple docstring""" lowercase__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase ( self : Tuple ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ : Optional[int] = [self.sep_token_id] lowercase__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase ( self : Tuple ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" lowercase__ : str = self._tokenizer.model.save(_snake_case ,name=_snake_case ) return tuple(_snake_case )
16
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _lowerCamelCase : List[str] = 5_0000 _lowerCamelCase : Optional[int] = 5000 _lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__) _lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" for i in range(A__ ): UpperCamelCase = dataset[i] @get_duration def __lowerCamelCase ( A__ , A__ , A__ ) -> int: """simple docstring""" for i in range(0 , len(A__ ) , A__ ): UpperCamelCase = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]: """simple docstring""" with dataset.formatted_as(type=A__ ): for i in range(A__ ): UpperCamelCase = dataset[i] @get_duration def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int: """simple docstring""" with dataset.formatted_as(type=A__ ): for i in range(0 , A__ , A__ ): UpperCamelCase = dataset[i : i + batch_size] def __lowerCamelCase ( ) -> List[str]: """simple docstring""" UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES} UpperCamelCase = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}), ] UpperCamelCase = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) UpperCamelCase = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) UpperCamelCase = generate_example_dataset( os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(A__ ) ) UpperCamelCase = func(A__ , **A__ ) print('shuffling dataset' ) UpperCamelCase = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(A__ ) ) UpperCamelCase = func( A__ , **A__ ) with open(A__ , 'wb' ) as f: f.write(json.dumps(A__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
28
0
"""simple docstring""" import itertools import string from collections.abc import Generator, Iterable def _A ( UpperCamelCase_ : Iterable[str], UpperCamelCase_ : int) -> Generator[tuple[str, ...], None, None]: '''simple docstring''' __lowercase = iter(UpperCamelCase_) while True: __lowercase = tuple(itertools.islice(UpperCamelCase_, UpperCamelCase_)) if not chunk: return yield chunk def _A ( UpperCamelCase_ : str) -> str: '''simple docstring''' __lowercase = "".join([c.upper() for c in dirty if c in string.ascii_letters]) __lowercase = "" if len(UpperCamelCase_) < 2: return dirty for i in range(len(UpperCamelCase_) - 1): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(UpperCamelCase_) & 1: clean += "X" return clean def _A ( UpperCamelCase_ : str) -> list[str]: '''simple docstring''' __lowercase = "ABCDEFGHIKLMNOPQRSTUVWXYZ" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler __lowercase = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(UpperCamelCase_) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(UpperCamelCase_) return table def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> str: '''simple docstring''' __lowercase = generate_table(UpperCamelCase_) __lowercase = prepare_input(UpperCamelCase_) __lowercase = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(UpperCamelCase_, 2): __lowercase ,__lowercase = divmod(table.index(UpperCamelCase_), 5) __lowercase ,__lowercase = divmod(table.index(UpperCamelCase_), 5) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> str: '''simple docstring''' __lowercase = generate_table(UpperCamelCase_) __lowercase = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(UpperCamelCase_, 2): __lowercase ,__lowercase = divmod(table.index(UpperCamelCase_), 5) __lowercase ,__lowercase = divmod(table.index(UpperCamelCase_), 5) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
17
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets _lowerCamelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n" _lowerCamelCase : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n" _lowerCamelCase : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def A ( self : Union[str, Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False ): """simple docstring""" if rouge_types is None: UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ ) if use_aggregator: UpperCamelCase = scoring.BootstrapAggregator() else: UpperCamelCase = [] for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = scorer.score(UpperCamelCase__ , UpperCamelCase__ ) if use_aggregator: aggregator.add_scores(UpperCamelCase__ ) else: scores.append(UpperCamelCase__ ) if use_aggregator: UpperCamelCase = aggregator.aggregate() else: UpperCamelCase = {} for key in scores[0]: UpperCamelCase = [score[key] for score in scores] return result
28
0
__lowerCamelCase : dict[tuple[int, int, int], int] = {} def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ): """simple docstring""" if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on SCREAMING_SNAKE_CASE_ : Optional[int] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one SCREAMING_SNAKE_CASE_ : Tuple = _calculate(days - 1 , lowerCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 SCREAMING_SNAKE_CASE_ : Optional[Any] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter SCREAMING_SNAKE_CASE_ : str = _calculate(days - 1 , lowerCAmelCase , 0 ) SCREAMING_SNAKE_CASE_ : str = state_late + state_absent + state_ontime SCREAMING_SNAKE_CASE_ : Dict = prizestrings return prizestrings def _snake_case ( lowerCAmelCase : int = 3_0 ): """simple docstring""" return _calculate(lowerCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
18
'''simple docstring''' from PIL import Image def __lowerCamelCase ( A__ , A__ ) -> Image: """simple docstring""" def brightness(A__ ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(A__ ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 _lowerCamelCase : List[str] = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
28
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __A ={ '''configuration_mobilebert''': [ '''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileBertConfig''', '''MobileBertOnnxConfig''', ], '''tokenization_mobilebert''': ['''MobileBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =['''MobileBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ '''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileBertForMaskedLM''', '''MobileBertForMultipleChoice''', '''MobileBertForNextSentencePrediction''', '''MobileBertForPreTraining''', '''MobileBertForQuestionAnswering''', '''MobileBertForSequenceClassification''', '''MobileBertForTokenClassification''', '''MobileBertLayer''', '''MobileBertModel''', '''MobileBertPreTrainedModel''', '''load_tf_weights_in_mobilebert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ '''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileBertForMaskedLM''', '''TFMobileBertForMultipleChoice''', '''TFMobileBertForNextSentencePrediction''', '''TFMobileBertForPreTraining''', '''TFMobileBertForQuestionAnswering''', '''TFMobileBertForSequenceClassification''', '''TFMobileBertForTokenClassification''', '''TFMobileBertMainLayer''', '''TFMobileBertModel''', '''TFMobileBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys __A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
19
'''simple docstring''' from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
28
0
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowercase : Any = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase ) class __snake_case ( lowerCAmelCase ): def __init__( self ,*snake_case ,**snake_case ): '''simple docstring''' super().__init__(*snake_case ,**snake_case ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ): '''simple docstring''' lowercase : List[Any] = {} if top_k is not None: lowercase : int = top_k return {}, {}, postprocess_params def __call__( self ,snake_case ,**snake_case ): '''simple docstring''' return super().__call__(snake_case ,**snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : Any = load_image(snake_case ) lowercase : List[Any] = self.image_processor(images=snake_case ,return_tensors=self.framework ) return model_inputs def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : int = self.model(**snake_case ) return model_outputs def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: lowercase : Tuple = self.model.config.num_labels if self.framework == "pt": lowercase : str = model_outputs.logits.softmax(-1 )[0] lowercase , lowercase : Dict = probs.topk(snake_case ) elif self.framework == "tf": lowercase : Optional[int] = stable_softmax(model_outputs.logits ,axis=-1 )[0] lowercase : Union[str, Any] = tf.math.top_k(snake_case ,k=snake_case ) lowercase , lowercase : List[str] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"Unsupported framework: {self.framework}" ) lowercase : Tuple = scores.tolist() lowercase : Dict = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case ,snake_case )]
20
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Optional[int]=3_6 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Union[str, Any]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def A ( self : int ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Optional[int] ): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.get_config() UpperCamelCase = 3_0_0 return config def A ( self : Tuple ): """simple docstring""" ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = self.prepare_config_and_inputs() UpperCamelCase = True UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): """simple docstring""" UpperCamelCase = MraModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , ): """simple docstring""" UpperCamelCase = True UpperCamelCase = MraModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = MraForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = MraForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MraForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MraForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict ): """simple docstring""" UpperCamelCase = self.num_choices UpperCamelCase = MraForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = () def A ( self : str ): """simple docstring""" UpperCamelCase = MraModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : str ): """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def A ( self : List[Any] ): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = MraModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @unittest.skip(reason='MRA does not output attentions' ) def A ( self : List[str] ): """simple docstring""" return @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = torch.Size((1, 2_5_6, 7_6_8) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = 5_0_2_6_5 UpperCamelCase = torch.Size((1, 2_5_6, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) UpperCamelCase = torch.arange(4_0_9_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = 5_0_2_6_5 UpperCamelCase = torch.Size((1, 4_0_9_6, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
28
0
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCamelCase( _a ): lowercase_ : Dict = ["""image_processor""", """tokenizer"""] lowercase_ : Optional[Any] = """FlavaImageProcessor""" lowercase_ : List[str] = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[str]: """simple docstring""" _lowercase : Dict = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', lowerCamelCase, ) _lowercase : List[Any] = kwargs.pop('feature_extractor') _lowercase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(lowerCamelCase, lowerCamelCase) _lowercase : int = self.image_processor def __call__( self, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = 0, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = True, lowerCamelCase = None, **lowerCamelCase, ) -> Dict: """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: _lowercase : Optional[int] = self.tokenizer( text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, ) if images is not None: _lowercase : str = self.image_processor( lowerCamelCase, return_image_mask=lowerCamelCase, return_codebook_pixels=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, ) if text is not None and images is not None: encoding.update(lowerCamelCase) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCamelCase), tensor_type=lowerCamelCase) def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> str: """simple docstring""" return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> List[str]: """simple docstring""" return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase) @property def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" _lowercase : Dict = self.tokenizer.model_input_names _lowercase : List[str] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', lowerCamelCase, ) return self.image_processor_class @property def UpperCamelCase ( self) -> int: """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', lowerCamelCase, ) return self.image_processor
21
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _lowerCamelCase : Union[str, Any] = "\\n\n" _lowerCamelCase : List[str] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" _lowerCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def A ( self : Tuple ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , ) def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int = 1_6 , UpperCamelCase__ : bool = True , UpperCamelCase__ : List[Any]=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCamelCase = 'cuda' else: UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu' UpperCamelCase = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ ) UpperCamelCase = model.to(UpperCamelCase__ ) UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(UpperCamelCase__ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCamelCase = model.config.max_length - 1 else: UpperCamelCase = model.config.max_length UpperCamelCase = tokenizer( UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='pt' , return_attention_mask=UpperCamelCase__ , ).to(UpperCamelCase__ ) UpperCamelCase = encodings['input_ids'] UpperCamelCase = encodings['attention_mask'] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCamelCase = [] UpperCamelCase = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ) ): UpperCamelCase = min(start_index + batch_size , len(UpperCamelCase__ ) ) UpperCamelCase = encoded_texts[start_index:end_index] UpperCamelCase = attn_masks[start_index:end_index] if add_start_token: UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase__ ) UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCamelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase__ ), attn_mask] , dim=1 ) UpperCamelCase = encoded_batch with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ).logits UpperCamelCase = out_logits[..., :-1, :].contiguous() UpperCamelCase = labels[..., 1:].contiguous() UpperCamelCase = attn_mask[..., 1:].contiguous() UpperCamelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase__ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase__ )}
28
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor __SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) class A_ ( lowerCAmelCase_ ): def __init__( self : str , *snake_case_ : List[str] , **snake_case_ : int ): warnings.warn( "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use LayoutLMv2ImageProcessor instead." , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
22
'''simple docstring''' def __lowerCamelCase ( A__ = 50 ) -> int: """simple docstring""" UpperCamelCase = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
28
0
'''simple docstring''' # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class SCREAMING_SNAKE_CASE( A__ , A__ , A__ , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = StableDiffusionControlNetImgaImgPipeline lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} ) lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def A ( self : Dict ) -> Dict: torch.manual_seed(0 ) UpperCAmelCase : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) UpperCAmelCase : Any = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) UpperCAmelCase : str = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) UpperCAmelCase : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCAmelCase : Optional[int] = CLIPTextModel(__snake_case ) UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase : Tuple = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any]=0 ) -> Optional[Any]: if str(__snake_case ).startswith('''mps''' ): UpperCAmelCase : List[Any] = torch.manual_seed(__snake_case ) else: UpperCAmelCase : Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) UpperCAmelCase : str = 2 UpperCAmelCase : Union[str, Any] = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ) UpperCAmelCase : Dict = floats_tensor(control_image.shape , rng=random.Random(__snake_case ) ).to(__snake_case ) UpperCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((64, 64) ) UpperCAmelCase : Any = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def A ( self : List[Any] ) -> str: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def A ( self : Any ) -> str: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def A ( self : Any ) -> Tuple: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class SCREAMING_SNAKE_CASE( A__ , A__ , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = StableDiffusionControlNetImgaImgPipeline lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCamelCase__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def A ( self : List[str] ) -> Union[str, Any]: torch.manual_seed(0 ) UpperCAmelCase : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__snake_case : List[str] ): if isinstance(__snake_case , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) UpperCAmelCase : Optional[int] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__snake_case ) torch.manual_seed(0 ) UpperCAmelCase : List[str] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__snake_case ) torch.manual_seed(0 ) UpperCAmelCase : List[Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) UpperCAmelCase : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCAmelCase : Any = CLIPTextModel(__snake_case ) UpperCAmelCase : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase : Union[str, Any] = MultiControlNetModel([controlneta, controlneta] ) UpperCAmelCase : Union[str, Any] = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A ( self : Union[str, Any] , __snake_case : Any , __snake_case : Optional[int]=0 ) -> Optional[Any]: if str(__snake_case ).startswith('''mps''' ): UpperCAmelCase : int = torch.manual_seed(__snake_case ) else: UpperCAmelCase : Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) UpperCAmelCase : Optional[Any] = 2 UpperCAmelCase : Optional[int] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ), ] UpperCAmelCase : int = floats_tensor(control_image[0].shape , rng=random.Random(__snake_case ) ).to(__snake_case ) UpperCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : Optional[Any] = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((64, 64) ) UpperCAmelCase : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def A ( self : Any ) -> Optional[Any]: UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**__snake_case ) pipe.to(__snake_case ) UpperCAmelCase : Optional[Any] = 10.0 UpperCAmelCase : Any = 4 UpperCAmelCase : Dict = self.get_dummy_inputs(__snake_case ) UpperCAmelCase : int = steps UpperCAmelCase : List[str] = scale UpperCAmelCase : Union[str, Any] = pipe(**__snake_case )[0] UpperCAmelCase : List[str] = self.get_dummy_inputs(__snake_case ) UpperCAmelCase : int = steps UpperCAmelCase : List[Any] = scale UpperCAmelCase : str = pipe(**__snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] UpperCAmelCase : str = self.get_dummy_inputs(__snake_case ) UpperCAmelCase : List[Any] = steps UpperCAmelCase : Union[str, Any] = scale UpperCAmelCase : int = pipe(**__snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] UpperCAmelCase : Tuple = self.get_dummy_inputs(__snake_case ) UpperCAmelCase : Optional[int] = steps UpperCAmelCase : Dict = scale UpperCAmelCase : Dict = pipe(**__snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def A ( self : int ) -> Dict: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def A ( self : Tuple ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def A ( self : Optional[Any] ) -> str: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def A ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase : List[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**__snake_case ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__snake_case ) except NotImplementedError: pass @slow @require_torch_gpu class SCREAMING_SNAKE_CASE( unittest.TestCase ): """simple docstring""" def A ( self : Optional[int] ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : str ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) UpperCAmelCase : int = StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=__snake_case , controlnet=__snake_case ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__snake_case ) UpperCAmelCase : str = torch.Generator(device='''cpu''' ).manual_seed(0 ) UpperCAmelCase : str = '''evil space-punk bird''' UpperCAmelCase : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) ) UpperCAmelCase : Dict = load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) ) UpperCAmelCase : Optional[int] = pipe( __snake_case , __snake_case , control_image=__snake_case , generator=__snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , ) UpperCAmelCase : Tuple = output.images[0] assert image.shape == (512, 512, 3) UpperCAmelCase : str = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9E-2
23
'''simple docstring''' def __lowerCamelCase ( A__ ) -> list: """simple docstring""" UpperCamelCase = len(A__ ) for i in range(1 , A__ ): UpperCamelCase = collection[i] UpperCamelCase = 0 UpperCamelCase = i - 1 while low <= high: UpperCamelCase = (low + high) // 2 if val < collection[mid]: UpperCamelCase = mid - 1 else: UpperCamelCase = mid + 1 for j in range(A__ , A__ , -1 ): UpperCamelCase = collection[j - 1] UpperCamelCase = val return collection if __name__ == "__main__": _lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip() _lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
28
0
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel snake_case_ = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } snake_case_ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Any=False ) -> Optional[Any]: __snake_case , __snake_case = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case_ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case_ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCamelCase__ ( snake_case_ : Dict ) -> Any: __snake_case = {} __snake_case = R'''.*sequential.(\d+).*''' __snake_case = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __snake_case = key.replace(snake_case_ , snake_case_ ) if re.match(snake_case_ , snake_case_ ): # replace sequential layers with list __snake_case = re.match(snake_case_ , snake_case_ ).group(1 ) __snake_case = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(snake_case_ )//3}.linear.""" ) elif re.match(snake_case_ , snake_case_ ): __snake_case = int(re.match(snake_case_ , snake_case_ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... __snake_case = 1 if projecton_layer == 0 else 2 __snake_case = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value __snake_case = value __snake_case = mixed_qkv.size(0 ) // 3 __snake_case = mixed_qkv[:qkv_dim] __snake_case = mixed_qkv[qkv_dim : qkv_dim * 2] __snake_case = mixed_qkv[qkv_dim * 2 :] __snake_case = query_layer __snake_case = key_layer __snake_case = value_layer else: __snake_case = value return model_state_dict def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Union[str, Any]=False ) -> List[str]: __snake_case , __snake_case = init_clap(snake_case_ , enable_fusion=snake_case_ ) clap_model.eval() __snake_case = clap_model.state_dict() __snake_case = rename_state_dict(snake_case_ ) __snake_case = ClapConfig() __snake_case = enable_fusion __snake_case = ClapModel(snake_case_ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case_ , strict=snake_case_ ) model.save_pretrained(snake_case_ ) transformers_config.save_pretrained(snake_case_ ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') snake_case_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
24
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = None def __lowerCamelCase ( A__ , A__=0.999 , A__="cosine" , ) -> Tuple: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(A__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A__ ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCamelCase = [] for i in range(A__ ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) ) return torch.tensor(A__ , dtype=torch.floataa ) class SCREAMING_SNAKE_CASE ( _a , _a ): """simple docstring""" @register_to_config def __init__( self : List[str] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : str = "fixed_small_log" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[float] = 1.0 , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : str = "squaredcos_cap_v2" , ): """simple docstring""" if beta_schedule != "squaredcos_cap_v2": raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' ) UpperCamelCase = betas_for_alpha_bar(UpperCamelCase__ ) UpperCamelCase = 1.0 - self.betas UpperCamelCase = torch.cumprod(self.alphas , dim=0 ) UpperCamelCase = torch.tensor(1.0 ) # standard deviation of the initial noise distribution UpperCamelCase = 1.0 # setable values UpperCamelCase = None UpperCamelCase = torch.from_numpy(np.arange(0 , UpperCamelCase__ )[::-1].copy() ) UpperCamelCase = variance_type def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ): """simple docstring""" return sample def A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ): """simple docstring""" UpperCamelCase = num_inference_steps UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) UpperCamelCase = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ ) def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None ): """simple docstring""" if prev_timestep is None: UpperCamelCase = t - 1 UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase = self.betas[t] else: UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: UpperCamelCase = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": UpperCamelCase = torch.log(torch.clamp(UpperCamelCase__ , min=1E-2_0 ) ) UpperCamelCase = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler UpperCamelCase = variance.log() UpperCamelCase = beta.log() UpperCamelCase = (predicted_variance + 1) / 2 UpperCamelCase = frac * max_log + (1 - frac) * min_log return variance def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 ) else: UpperCamelCase = None # 1. compute alphas, betas if prev_timestep is None: UpperCamelCase = t - 1 UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase = self.betas[t] UpperCamelCase = self.alphas[t] else: UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev UpperCamelCase = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCamelCase = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" ' for the UnCLIPScheduler.' ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCamelCase = torch.clamp( UpperCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCamelCase = 0 if t > 0: UpperCamelCase = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ , device=model_output.device ) UpperCamelCase = self._get_variance( UpperCamelCase__ , predicted_variance=UpperCamelCase__ , prev_timestep=UpperCamelCase__ , ) if self.variance_type == "fixed_small_log": UpperCamelCase = variance elif self.variance_type == "learned_range": UpperCamelCase = (0.5 * variance).exp() else: raise ValueError( f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" ' for the UnCLIPScheduler.' ) UpperCamelCase = variance * variance_noise UpperCamelCase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.IntTensor , ): """simple docstring""" UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) UpperCamelCase = timesteps.to(original_samples.device ) UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
28
0
"""simple docstring""" from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "layer_norm" , SCREAMING_SNAKE_CASE__ = False , ) -> Optional[Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : List[Any] = only_cross_attention SCREAMING_SNAKE_CASE__ : List[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero""" SCREAMING_SNAKE_CASE__ : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm""" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: SCREAMING_SNAKE_CASE__ : int = AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE__ : Tuple = AdaLayerNormZero(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = Attention( query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. SCREAMING_SNAKE_CASE__ : Any = ( AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : Tuple = Attention( query_dim=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # is self-attn if encoder_hidden_states is none else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : str = None # 3. Feed-forward SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = FeedForward(SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , final_dropout=SCREAMING_SNAKE_CASE__ ) # let chunk size default to None SCREAMING_SNAKE_CASE__ : Tuple = None SCREAMING_SNAKE_CASE__ : Dict = 0 def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = chunk_size SCREAMING_SNAKE_CASE__ : str = dim def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ) -> Any: """simple docstring""" if self.use_ada_layer_norm: SCREAMING_SNAKE_CASE__ : Any = self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.norma( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=hidden_states.dtype ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.norma(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {} SCREAMING_SNAKE_CASE__ : List[Any] = self.attna( SCREAMING_SNAKE_CASE__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) if self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE__ : List[str] = gate_msa.unsqueeze(1 ) * attn_output SCREAMING_SNAKE_CASE__ : Dict = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: SCREAMING_SNAKE_CASE__ : str = ( self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : List[str] = self.attna( SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = attn_output + hidden_states # 3. Feed-forward SCREAMING_SNAKE_CASE__ : Any = self.norma(SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE__ : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) SCREAMING_SNAKE_CASE__ : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size SCREAMING_SNAKE_CASE__ : int = torch.cat( [self.ff(SCREAMING_SNAKE_CASE__ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE__ , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: SCREAMING_SNAKE_CASE__ : int = self.ff(SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE__ : Tuple = gate_mlp.unsqueeze(1 ) * ff_output SCREAMING_SNAKE_CASE__ : Optional[Any] = ff_output + hidden_states return hidden_states class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = False , ) -> Tuple: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : Optional[int] = int(dim * mult ) SCREAMING_SNAKE_CASE__ : Tuple = dim_out if dim_out is not None else dim if activation_fn == "gelu": SCREAMING_SNAKE_CASE__ : str = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if activation_fn == "gelu-approximate": SCREAMING_SNAKE_CASE__ : List[str] = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , approximate="""tanh""" ) elif activation_fn == "geglu": SCREAMING_SNAKE_CASE__ : Optional[Any] = GEGLU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif activation_fn == "geglu-approximate": SCREAMING_SNAKE_CASE__ : int = ApproximateGELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = nn.ModuleList([] ) # project in self.net.append(SCREAMING_SNAKE_CASE__ ) # project dropout self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) ) # project out self.net.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" for module in self.net: SCREAMING_SNAKE_CASE__ : Dict = module(SCREAMING_SNAKE_CASE__ ) return hidden_states class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "none" ) -> List[Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = approximate def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" if gate.device.type != "mps": return F.gelu(SCREAMING_SNAKE_CASE__ , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.proj(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = self.gelu(SCREAMING_SNAKE_CASE__ ) return hidden_states class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : Dict = nn.Linear(SCREAMING_SNAKE_CASE__ , dim_out * 2 ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" if gate.device.type != "mps": return F.gelu(SCREAMING_SNAKE_CASE__ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.proj(SCREAMING_SNAKE_CASE__ ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(SCREAMING_SNAKE_CASE__ ) class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.proj(SCREAMING_SNAKE_CASE__ ) return x * torch.sigmoid(1.702 * x ) class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : List[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = nn.SiLU() SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , embedding_dim * 2 ) SCREAMING_SNAKE_CASE__ : List[str] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ ) ) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 ) SCREAMING_SNAKE_CASE__ : Dict = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale) + shift return x class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : Any = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.SiLU() SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ , eps=1E-6 ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=SCREAMING_SNAKE_CASE__ ) ) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = emb.chunk(6 , dim=1 ) SCREAMING_SNAKE_CASE__ : Tuple = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1E-5 ) -> int: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : List[Any] = num_groups SCREAMING_SNAKE_CASE__ : Any = eps if act_fn is None: SCREAMING_SNAKE_CASE__ : Any = None else: SCREAMING_SNAKE_CASE__ : Dict = get_activation(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , out_dim * 2 ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" if self.act: SCREAMING_SNAKE_CASE__ : Tuple = self.act(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = self.linear(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = emb[:, :, None, None] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = emb.chunk(2 , dim=1 ) SCREAMING_SNAKE_CASE__ : Dict = F.group_norm(SCREAMING_SNAKE_CASE__ , self.num_groups , eps=self.eps ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = x * (1 + scale) + shift return x
25
'''simple docstring''' import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = num_stages UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = initializer_range UpperCamelCase = out_features UpperCamelCase = out_indices UpperCamelCase = scope def A ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def A ( self : List[str] ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = ConvNextModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( {"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def A ( self : Tuple ): """simple docstring""" UpperCamelCase = ConvNextModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : List[str] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : Optional[int] ): """simple docstring""" return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def A ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def A ( self : List[Any] ): """simple docstring""" pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def A ( self : Optional[int] ): """simple docstring""" pass def A ( self : Any ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(UpperCamelCase__ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def A ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ): UpperCamelCase = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def A ( self : Dict ): """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def __lowerCamelCase ( ) -> Any: """simple docstring""" UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def A ( self : Optional[Any] ): """simple docstring""" return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**UpperCamelCase__ ) # verify the logits UpperCamelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else () _SCREAMING_SNAKE_CASE = ConvNextConfig _SCREAMING_SNAKE_CASE = False def A ( self : Tuple ): """simple docstring""" UpperCamelCase = ConvNextModelTester(self )
28
0
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "efficientnet" def __init__( self , _a = 3 , _a = 600 , _a = 2.0 , _a = 3.1 , _a = 8 , _a = [3, 3, 5, 3, 5, 5, 3] , _a = [32, 16, 24, 40, 80, 112, 192] , _a = [16, 24, 40, 80, 112, 192, 320] , _a = [] , _a = [1, 2, 2, 2, 1, 2, 1] , _a = [1, 2, 2, 3, 3, 4, 1] , _a = [1, 6, 6, 6, 6, 6, 6] , _a = 0.25 , _a = "swish" , _a = 2560 , _a = "mean" , _a = 0.02 , _a = 0.001 , _a = 0.99 , _a = 0.5 , _a = 0.2 , **_a , ) -> List[str]: super().__init__(**_a ) _A : int = num_channels _A : Optional[Any] = image_size _A : int = width_coefficient _A : Tuple = depth_coefficient _A : Any = depth_divisor _A : Any = kernel_sizes _A : List[str] = in_channels _A : str = out_channels _A : str = depthwise_padding _A : int = strides _A : Optional[Any] = num_block_repeats _A : Optional[Any] = expand_ratios _A : List[str] = squeeze_expansion_ratio _A : str = hidden_act _A : Tuple = hidden_dim _A : Optional[int] = pooling_type _A : Any = initializer_range _A : Dict = batch_norm_eps _A : Tuple = batch_norm_momentum _A : Union[str, Any] = dropout_rate _A : Union[str, Any] = drop_connect_rate _A : Optional[int] = sum(_a ) * 4 class lowercase ( UpperCamelCase__ ): _a = version.parse("1.11" ) @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def a__ ( self ) -> float: return 1e-5
26
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : int = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _lowerCamelCase : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict: """simple docstring""" UpperCamelCase = state_dict.pop(A__ ) UpperCamelCase = val def __lowerCamelCase ( A__ ) -> int: """simple docstring""" UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def __lowerCamelCase ( A__ ) -> Dict: """simple docstring""" UpperCamelCase = '' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase = state_dict.pop( F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase = in_proj_weight_cross_attn[:256, :] UpperCamelCase = in_proj_bias_cross_attn[:256] UpperCamelCase = in_proj_weight_cross_attn[256:512, :] UpperCamelCase = in_proj_bias_cross_attn[256:512] UpperCamelCase = in_proj_weight_cross_attn[-256:, :] UpperCamelCase = in_proj_bias_cross_attn[-256:] def __lowerCamelCase ( A__ , A__ ) -> Optional[int]: """simple docstring""" UpperCamelCase , UpperCamelCase = image.size UpperCamelCase = max(A__ , A__ ) UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000 UpperCamelCase = target_max_size / current_max_size UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def __lowerCamelCase ( A__ ) -> List[Any]: """simple docstring""" UpperCamelCase = F.to_tensor(A__ ) UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]: """simple docstring""" logger.info('Converting model...' ) # load original state dict UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' ) # rename keys for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) UpperCamelCase = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = 'model.' for key in state_dict.copy().keys(): if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): UpperCamelCase = state_dict.pop(A__ ) UpperCamelCase = val # create HuggingFace model and load state dict UpperCamelCase = TableTransformerConfig( backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase = 15 UpperCamelCase = 2 UpperCamelCase = {0: 'table', 1: 'table rotated'} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} else: UpperCamelCase = 125 UpperCamelCase = 6 UpperCamelCase = { 0: 'table', 1: 'table column', 2: 'table row', 3: 'table column header', 4: 'table projected row header', 5: 'table spanning cell', } UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} UpperCamelCase = DetrImageProcessor( format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 ) UpperCamelCase = TableTransformerForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() # verify our conversion UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png' UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ ) UpperCamelCase = Image.open(A__ ).convert('RGB' ) UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 ) UpperCamelCase = model(A__ ) if "detection" in checkpoint_url: UpperCamelCase = (1, 15, 3) UpperCamelCase = torch.tensor( [[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] ) UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] ) else: UpperCamelCase = (1, 125, 7) UpperCamelCase = torch.tensor( [[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] ) UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if push_to_hub: # Push model to HF hub logger.info('Pushing model to the hub...' ) UpperCamelCase = ( 'microsoft/table-transformer-detection' if 'detection' in checkpoint_url else 'microsoft/table-transformer-structure-recognition' ) model.push_to_hub(A__ ) image_processor.push_to_hub(A__ ) if __name__ == "__main__": _lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", type=str, choices=[ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", ], help="URL of the Table Transformer checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowerCamelCase : int = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
28
0
'''simple docstring''' from importlib import import_module from .logging import get_logger __lowercase : Tuple = get_logger(__name__) class __UpperCamelCase : def __init__( self , __a , __a=None ): '''simple docstring''' __a : List[Any] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('__' ): setattr(self , __a , getattr(__a , __a ) ) __a : Dict = module._original_module if isinstance(__a , _PatchedModuleObj ) else module class __UpperCamelCase : A_ = [] def __init__( self , __a , __a , __a , __a=None ): '''simple docstring''' __a : int = obj __a : Union[str, Any] = target __a : int = new __a : Dict = target.split('.' )[0] __a : Any = {} __a : Any = attrs or [] def __enter__( self ): '''simple docstring''' *__a , __a : Dict = self.target.split('.' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(__a ) ): try: __a : str = import_module('.'.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __a : Optional[int] = getattr(self.obj , __a ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(__a , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __a : List[str] = obj_attr # patch at top level setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs ) ) __a : List[str] = getattr(self.obj , __a ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a ) , attrs=self.attrs ) ) __a : str = getattr(__a , __a ) # finally set the target attribute setattr(__a , __a , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __a : Any = getattr(import_module('.'.join(__a ) ) , __a ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , __a ) is attr_value: __a : List[Any] = getattr(self.obj , __a ) setattr(self.obj , __a , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __a : str = globals()['__builtins__'][target_attr] setattr(self.obj , __a , self.new ) else: raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self , *__a ): '''simple docstring''' for attr in list(self.original ): setattr(self.obj , __a , self.original.pop(__a ) ) def __UpperCAmelCase ( self ): '''simple docstring''' self.__enter__() self._active_patches.append(self ) def __UpperCAmelCase ( self ): '''simple docstring''' try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
27
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowerCamelCase : Any = logging.get_logger(__name__) @add_end_docstrings(_a ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ): """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) requires_backends(self , 'decord' ) self.check_model_type(UpperCamelCase__ ) def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ): """simple docstring""" UpperCamelCase = {} if frame_sampling_rate is not None: UpperCamelCase = frame_sampling_rate if num_frames is not None: UpperCamelCase = num_frames UpperCamelCase = {} if top_k is not None: UpperCamelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ): """simple docstring""" return super().__call__(UpperCamelCase__ , **UpperCamelCase__ ) def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ): """simple docstring""" if num_frames is None: UpperCamelCase = self.model.config.num_frames if video.startswith('http://' ) or video.startswith('https://' ): UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content ) UpperCamelCase = VideoReader(UpperCamelCase__ ) videoreader.seek(0 ) UpperCamelCase = 0 UpperCamelCase = num_frames * frame_sampling_rate - 1 UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa ) UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy() UpperCamelCase = list(UpperCamelCase__ ) UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework ) return model_inputs def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = self.model(**UpperCamelCase__ ) return model_outputs def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCamelCase = self.model.config.num_labels if self.framework == "pt": UpperCamelCase = model_outputs.logits.softmax(-1 )[0] UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ ) else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCamelCase = scores.tolist() UpperCamelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
28
0
def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if not head: return True # split the list to two parts UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head while fast and fast.next: UpperCAmelCase_ : str = fast.next.next UpperCAmelCase_ : Union[str, Any] = slow.next UpperCAmelCase_ : int = slow.next UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works! # reverse the second part UpperCAmelCase_ : Tuple = None while second: UpperCAmelCase_ : int = second.next UpperCAmelCase_ : Any = node UpperCAmelCase_ : Optional[Any] = second UpperCAmelCase_ : Tuple = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False UpperCAmelCase_ : Optional[Any] = node.next UpperCAmelCase_ : Dict = head.next return True def lowercase__ ( __snake_case : Union[str, Any] ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) UpperCAmelCase_ : Any = head while fast and fast.next: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next # 2. Push the second half into the stack UpperCAmelCase_ : List[str] = [slow.val] while slow.next: UpperCAmelCase_ : List[str] = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False UpperCAmelCase_ : int = cur.next return True def lowercase__ ( __snake_case : Dict ): '''simple docstring''' if not head or not head.next: return True UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : int = 0 while head: if head.val in d: d[head.val].append(__snake_case ) else: UpperCAmelCase_ : List[Any] = [pos] UpperCAmelCase_ : Any = head.next pos += 1 UpperCAmelCase_ : Dict = pos - 1 UpperCAmelCase_ : Optional[int] = 0 for v in d.values(): if len(__snake_case ) % 2 != 0: middle += 1 else: UpperCAmelCase_ : int = 0 for i in range(0 , len(__snake_case ) ): if v[i] + v[len(__snake_case ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
29
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _lowerCamelCase : Optional[int] = ( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) _lowerCamelCase : Union[str, Any] = ( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) _lowerCamelCase : Dict = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) _lowerCamelCase : Dict = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) _lowerCamelCase : Optional[Any] = ( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]), ("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [14, 13, 12, 11, 10]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) _lowerCamelCase : List[Any] = ( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) _lowerCamelCase : List[str] = ( ("JH AH TH KH QH", 23), ("JH 9H TH KH QH", 22), ("JC KH JS JD JH", 21), ("KH KC 3S 3H 3D", 20), ("8C 9C 5C 3C TC", 19), ("JS QS 9H TS KH", 18), ("7C 7S KH 2H 7H", 17), ("3C KH 5D 5S KH", 16), ("QH 8H KD JH 8S", 15), ("2D 6D 9D TH 7D", 14), ) def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) ) UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]: """simple docstring""" return (generate_random_hand() for _ in range(A__ )) @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" assert PokerHand(A__ )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" assert PokerHand(A__ )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , A__ ) def __lowerCamelCase ( A__ , A__ , A__ ) -> str: """simple docstring""" UpperCamelCase = PokerHand(A__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Dict: """simple docstring""" assert PokerHand(A__ )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> str: """simple docstring""" assert PokerHand(A__ )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , A__ ) def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple: """simple docstring""" assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]: """simple docstring""" assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected def __lowerCamelCase ( ) -> str: """simple docstring""" UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS] UpperCamelCase = poker_hands.copy() shuffle(A__ ) UpperCamelCase = chain(sorted(A__ ) ) for index, hand in enumerate(A__ ): assert hand == poker_hands[index] def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" # Test that five high straights are compared correctly. UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=A__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def __lowerCamelCase ( ) -> str: """simple docstring""" # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. UpperCamelCase = PokerHand('2C 4S AS 3D 5C' ) UpperCamelCase = True UpperCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def __lowerCamelCase ( ) -> List[str]: """simple docstring""" # Problem number 54 from Project Euler # Testing from poker_hands.txt file UpperCamelCase = 0 UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) ) UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' ) with open(A__ ) as file_hand: for line in file_hand: UpperCamelCase = line[:14].strip() UpperCamelCase = line[15:].strip() UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ ) UpperCamelCase = player.compare_with(A__ ) if output == "Win": answer += 1 assert answer == 376
28
0
import os # Precomputes a list of the 100 first triangular numbers __a = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def a ( ): '''simple docstring''' lowercase_ = os.path.dirname(os.path.realpath(snake_case__ ) ) lowercase_ = os.path.join(snake_case__ , '''words.txt''' ) lowercase_ = '''''' with open(snake_case__ ) as f: lowercase_ = f.readline() lowercase_ = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )] lowercase_ = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
30
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = None class SCREAMING_SNAKE_CASE ( _a , _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 2 @register_to_config def __init__( self : Union[str, Any] , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 1.0_0_7 , UpperCamelCase__ : float = 8_0 , UpperCamelCase__ : float = 0.0_5 , UpperCamelCase__ : float = 5_0 , ): """simple docstring""" UpperCamelCase = sigma_max # setable values UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None # sigma(t_i) def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ): """simple docstring""" return sample def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ): """simple docstring""" UpperCamelCase = num_inference_steps UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy() UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ ) UpperCamelCase = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] UpperCamelCase = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ ) def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : Optional[torch.Generator] = None ): """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: UpperCamelCase = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device ) UpperCamelCase = sigma + gamma * sigma UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = sample_hat + sigma_hat * model_output UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = sample_prev + sigma_prev * model_output UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ): """simple docstring""" raise NotImplementedError()
28
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCamelCase_ (unittest.TestCase ): '''simple docstring''' @property def _A ( self : List[str] ): torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def _A ( self : int ): _UpperCAmelCase : Optional[Any] = self.dummy_uncond_unet _UpperCAmelCase : Any = ScoreSdeVeScheduler() _UpperCAmelCase : str = ScoreSdeVePipeline(unet=A , scheduler=A ) sde_ve.to(A ) sde_ve.set_progress_bar_config(disable=A ) _UpperCAmelCase : List[str] = torch.manual_seed(0 ) _UpperCAmelCase : List[str] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A ).images _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A , return_dict=A )[ 0 ] _UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] _UpperCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCAmelCase : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowerCamelCase_ (unittest.TestCase ): '''simple docstring''' def _A ( self : int ): _UpperCAmelCase : int = "google/ncsnpp-church-256" _UpperCAmelCase : Any = UNetaDModel.from_pretrained(A ) _UpperCAmelCase : List[Any] = ScoreSdeVeScheduler.from_pretrained(A ) _UpperCAmelCase : Union[str, Any] = ScoreSdeVePipeline(unet=A , scheduler=A ) sde_ve.to(A ) sde_ve.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = torch.manual_seed(0 ) _UpperCAmelCase : Tuple = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=A ).images _UpperCAmelCase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _UpperCAmelCase : int = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
31
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Tuple = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
0
import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() UpperCAmelCase_ : Optional[int] = logging.get_logger('transformers.models.encodec') UpperCAmelCase_ : Any = { 'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited', 'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size', 'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed', 'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg', } UpperCAmelCase_ : str = { 'encoder.model.0.conv.conv': 'encoder.layers.0.conv', 'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv', 'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv', 'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv', 'encoder.model.3.conv.conv': 'encoder.layers.3.conv', 'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv', 'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv', 'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv', 'encoder.model.6.conv.conv': 'encoder.layers.6.conv', 'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv', 'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv', 'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv', 'encoder.model.9.conv.conv': 'encoder.layers.9.conv', 'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv', 'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv', 'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv', 'encoder.model.12.conv.conv': 'encoder.layers.12.conv', 'encoder.model.13.lstm': 'encoder.layers.13.lstm', 'encoder.model.15.conv.conv': 'encoder.layers.15.conv', } UpperCAmelCase_ : Any = { 'encoder.model.0.conv.norm': 'encoder.layers.0.norm', 'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm', 'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm', 'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm', 'encoder.model.3.conv.norm': 'encoder.layers.3.norm', 'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm', 'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm', 'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm', 'encoder.model.6.conv.norm': 'encoder.layers.6.norm', 'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm', 'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm', 'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm', 'encoder.model.9.conv.norm': 'encoder.layers.9.norm', 'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm', 'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm', 'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm', 'encoder.model.12.conv.norm': 'encoder.layers.12.norm', 'encoder.model.15.conv.norm': 'encoder.layers.15.norm', } UpperCAmelCase_ : Optional[int] = { 'decoder.model.0.conv.conv': 'decoder.layers.0.conv', 'decoder.model.1.lstm': 'decoder.layers.1.lstm', 'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv', 'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv', 'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv', 'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv', 'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv', 'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv', 'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv', 'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv', 'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv', 'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv', 'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv', 'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv', 'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv', 'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv', 'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv', 'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv', 'decoder.model.15.conv.conv': 'decoder.layers.15.conv', } UpperCAmelCase_ : List[str] = { 'decoder.model.0.conv.norm': 'decoder.layers.0.norm', 'decoder.model.3.convtr.norm': 'decoder.layers.3.norm', 'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm', 'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm', 'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm', 'decoder.model.6.convtr.norm': 'decoder.layers.6.norm', 'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm', 'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm', 'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm', 'decoder.model.9.convtr.norm': 'decoder.layers.9.norm', 'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm', 'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm', 'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm', 'decoder.model.12.convtr.norm': 'decoder.layers.12.norm', 'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm', 'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm', 'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm', 'decoder.model.15.conv.norm': 'decoder.layers.15.norm', } UpperCAmelCase_ : List[str] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } UpperCAmelCase_ : Optional[Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : List[Any] = [] def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Tuple , __A : List[Any] , __A : Union[str, Any] , __A : List[Any] ) -> Optional[Any]: """simple docstring""" for attribute in key.split('.' ): a_ : Optional[int] = getattr(__A , __A ) if weight_type is not None: a_ : Any = getattr(__A , __A ).shape else: a_ : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": a_ : Optional[int] = value elif weight_type == "weight_g": a_ : Dict = value elif weight_type == "weight_v": a_ : Optional[int] = value elif weight_type == "bias": a_ : Tuple = value elif weight_type == "running_mean": a_ : Optional[int] = value elif weight_type == "running_var": a_ : int = value elif weight_type == "num_batches_tracked": a_ : List[Any] = value elif weight_type == "weight_ih_l0": a_ : Optional[Any] = value elif weight_type == "weight_hh_l0": a_ : Optional[int] = value elif weight_type == "bias_ih_l0": a_ : Any = value elif weight_type == "bias_hh_l0": a_ : Any = value elif weight_type == "weight_ih_l1": a_ : Optional[int] = value elif weight_type == "weight_hh_l1": a_ : Any = value elif weight_type == "bias_ih_l1": a_ : List[str] = value elif weight_type == "bias_hh_l1": a_ : Optional[Any] = value else: a_ : Optional[Any] = value logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Dict ) -> Tuple: """simple docstring""" for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: a_ , a_ : Optional[int] = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : int , __A : Union[str, Any] ) -> int: """simple docstring""" a_ : str = [] if model_name == "encodec_24khz" or "encodec_32khz": a_ : List[str] = MAPPING_24K elif model_name == "encodec_48khz": a_ : int = MAPPING_48K else: raise ValueError(F"""Unsupported model: {model_name}""" ) for name, value in orig_dict.items(): if should_ignore(__A , __A ): logger.info(F"""{name} was ignored""" ) continue a_ : Optional[int] = False for key, mapped_key in MAPPING.items(): if "*" in key: a_ , a_ : Any = key.split('.*.' ) if prefix in name and suffix in name: a_ : int = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('embed' ) and name.endswith('embed_avg' ): continue a_ : Optional[Any] = True if "*" in mapped_key: a_ : Union[str, Any] = name.split(__A )[0].split('.' )[-2] a_ : Optional[int] = mapped_key.replace('*' , __A ) if "weight_g" in name: a_ : int = 'weight_g' elif "weight_v" in name: a_ : Tuple = 'weight_v' elif "weight_ih_l0" in name: a_ : Optional[int] = 'weight_ih_l0' elif "weight_hh_l0" in name: a_ : Any = 'weight_hh_l0' elif "bias_ih_l0" in name: a_ : Tuple = 'bias_ih_l0' elif "bias_hh_l0" in name: a_ : List[str] = 'bias_hh_l0' elif "weight_ih_l1" in name: a_ : Optional[Any] = 'weight_ih_l1' elif "weight_hh_l1" in name: a_ : Any = 'weight_hh_l1' elif "bias_ih_l1" in name: a_ : Optional[Any] = 'bias_ih_l1' elif "bias_hh_l1" in name: a_ : Dict = 'bias_hh_l1' elif "bias" in name: a_ : int = 'bias' elif "weight" in name: a_ : Union[str, Any] = 'weight' elif "running_mean" in name: a_ : Union[str, Any] = 'running_mean' elif "running_var" in name: a_ : int = 'running_var' elif "num_batches_tracked" in name: a_ : str = 'num_batches_tracked' else: a_ : List[Any] = None set_recursively(__A , __A , __A , __A , __A ) continue if not is_used: unused_weights.append(__A ) logger.warning(F"""Unused weights: {unused_weights}""" ) @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Dict , __A : Tuple , __A : List[str]=None , __A : int=None , ) -> Tuple: """simple docstring""" if config_path is not None: a_ : List[str] = EncodecConfig.from_pretrained(__A ) else: a_ : List[Any] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": a_ : Optional[int] = [8, 5, 4, 4] a_ : Any = [2.2] a_ : List[str] = 64 a_ : Optional[Any] = 3_20_00 a_ : Union[str, Any] = 20_48 a_ : str = False a_ : Any = False a_ : List[str] = False elif model_name == "encodec_48khz": a_ : int = [8, 5, 4, 2] a_ : str = [3.0, 6.0, 12.0, 24.0] a_ : Any = 4_80_00 a_ : str = 2 a_ : Dict = False a_ : List[Any] = 'time_group_norm' a_ : List[str] = True a_ : str = 1.0 a_ : List[Any] = 0.01 else: raise ValueError(F"""Unknown model name: {model_name}""" ) a_ : Union[str, Any] = EncodecModel(__A ) a_ : Optional[int] = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(__A ) a_ : Union[str, Any] = torch.load(__A ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights a_ : Dict = original_checkpoint['best_state'] recursively_load_weights(__A , __A , __A ) model.save_pretrained(__A ) if repo_id: print('Pushing to the hub...' ) feature_extractor.push_to_hub(__A ) model.push_to_hub(__A ) if __name__ == "__main__": UpperCAmelCase_ : List[Any] = argparse.ArgumentParser() parser.add_argument( '--model', default='encodec_24khz', type=str, help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) UpperCAmelCase_ : int = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
32
'''simple docstring''' def __lowerCamelCase ( A__ = 10**9 ) -> int: """simple docstring""" UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
28
0
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin __A : int = get_tests_dir('''fixtures/test_sentencepiece.model''') __A : List[str] = {'''target_lang''': '''fi''', '''source_lang''': '''en'''} __A : int = '''>>zh<<''' __A : Any = '''Helsinki-NLP/''' if is_torch_available(): __A : List[Any] = '''pt''' elif is_tf_available(): __A : Union[str, Any] = '''tf''' else: __A : Union[str, Any] = '''jax''' @require_sentencepiece class _UpperCAmelCase ( _A , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Tuple = MarianTokenizer SCREAMING_SNAKE_CASE_ : List[str] = False SCREAMING_SNAKE_CASE_ : Tuple = True def A ( self : Dict ) -> Union[str, Any]: super().setUp() lowercase_ : Optional[int] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] lowercase_ : Dict = dict(zip(A , range(len(A ) ) ) ) lowercase_ : Dict = Path(self.tmpdirname ) save_json(A , save_dir / VOCAB_FILES_NAMES['''vocab'''] ) save_json(A , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(A , save_dir / VOCAB_FILES_NAMES['''source_spm'''] ) copyfile(A , save_dir / VOCAB_FILES_NAMES['''target_spm'''] ) lowercase_ : List[Any] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def A ( self : str , **A : List[Any] ) -> MarianTokenizer: return MarianTokenizer.from_pretrained(self.tmpdirname , **A ) def A ( self : List[str] , A : int ) -> int: return ( "This is a test", "This is a test", ) def A ( self : int ) -> int: lowercase_ : Tuple = '''</s>''' lowercase_ : Dict = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def A ( self : List[str] ) -> str: lowercase_ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(A ) , 9 ) def A ( self : str ) -> Any: self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def A ( self : Any ) -> Optional[int]: lowercase_ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) lowercase_ : Dict = en_de_tokenizer(['''I am a small frog'''] , return_tensors=A ) self.assertIsInstance(A , A ) lowercase_ : Optional[int] = [38, 1_21, 14, 6_97, 3_88_48, 0] self.assertListEqual(A , batch.input_ids[0] ) lowercase_ : Optional[int] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(A ) lowercase_ : int = [x.name for x in Path(A ).glob('''*''' )] self.assertIn('''source.spm''' , A ) MarianTokenizer.from_pretrained(A ) def A ( self : List[Any] ) -> int: lowercase_ : int = self.get_tokenizer() lowercase_ : Optional[Any] = tok( ['''I am a small frog''' * 10_00, '''I am a small frog'''] , padding=A , truncation=A , return_tensors=A ) self.assertIsInstance(A , A ) self.assertEqual(batch.input_ids.shape , (2, 5_12) ) def A ( self : Optional[int] ) -> str: lowercase_ : Tuple = self.get_tokenizer() lowercase_ : List[Any] = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=A , return_tensors=A ) self.assertIsInstance(A , A ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def A ( self : Optional[Any] ) -> Union[str, Any]: # fmt: off lowercase_ : Dict = {'''input_ids''': [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , ) def A ( self : Optional[int] ) -> List[str]: lowercase_ : Any = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' ) lowercase_ : Optional[Any] = '''Tämä on testi''' lowercase_ : Union[str, Any] = '''This is a test''' lowercase_ : str = [76, 7, 20_47, 2] lowercase_ : Union[str, Any] = [69, 12, 11, 9_40, 2] lowercase_ : int = tokenizer(A ).input_ids self.assertListEqual(A , A ) lowercase_ : int = tokenizer(text_target=A ).input_ids self.assertListEqual(A , A ) lowercase_ : Optional[Any] = tokenizer.decode(A , skip_special_tokens=A ) self.assertEqual(A , A )
33
'''simple docstring''' import math class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1 """simple docstring""" UpperCamelCase = n UpperCamelCase = [ [math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ ) ] # adjacency matrix for weight UpperCamelCase = [ [math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ ) ] # dp[i][j] stores minimum distance from i to j def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = w def A ( self : str ): """simple docstring""" for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ): """simple docstring""" return self.dp[u][v] if __name__ == "__main__": _lowerCamelCase : List[str] = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
28
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() A =logging.get_logger(__name__) def snake_case_ (_a : str ): UpperCAmelCase = DPTConfig() if "large" in checkpoint_url: UpperCAmelCase = 1_0_2_4 UpperCAmelCase = 4_0_9_6 UpperCAmelCase = 2_4 UpperCAmelCase = 1_6 UpperCAmelCase = [5, 1_1, 1_7, 2_3] UpperCAmelCase = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4] UpperCAmelCase = (1, 3_8_4, 3_8_4) if "ade" in checkpoint_url: UpperCAmelCase = True UpperCAmelCase = 1_5_0 UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = '''ade20k-id2label.json''' UpperCAmelCase = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type='''dataset''' ) ) , '''r''' ) ) UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} UpperCAmelCase = [1, 1_5_0, 4_8_0, 4_8_0] return config, expected_shape def snake_case_ (_a : List[str] ): UpperCAmelCase = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(_a , _a ) def snake_case_ (_a : Union[str, Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCAmelCase = name.replace('''pretrained.model''' , '''dpt.encoder''' ) if "pretrained.model" in name: UpperCAmelCase = name.replace('''pretrained.model''' , '''dpt.embeddings''' ) if "patch_embed" in name: UpperCAmelCase = name.replace('''patch_embed''' , '''patch_embeddings''' ) if "pos_embed" in name: UpperCAmelCase = name.replace('''pos_embed''' , '''position_embeddings''' ) if "attn.proj" in name: UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "proj" in name and "project" not in name: UpperCAmelCase = name.replace('''proj''' , '''projection''' ) if "blocks" in name: UpperCAmelCase = name.replace('''blocks''' , '''layer''' ) if "mlp.fc1" in name: UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) if "norm1" in name: UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "scratch.output_conv" in name: UpperCAmelCase = name.replace('''scratch.output_conv''' , '''head''' ) if "scratch" in name: UpperCAmelCase = name.replace('''scratch''' , '''neck''' ) if "layer1_rn" in name: UpperCAmelCase = name.replace('''layer1_rn''' , '''convs.0''' ) if "layer2_rn" in name: UpperCAmelCase = name.replace('''layer2_rn''' , '''convs.1''' ) if "layer3_rn" in name: UpperCAmelCase = name.replace('''layer3_rn''' , '''convs.2''' ) if "layer4_rn" in name: UpperCAmelCase = name.replace('''layer4_rn''' , '''convs.3''' ) if "refinenet" in name: UpperCAmelCase = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCAmelCase = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" ) if "out_conv" in name: UpperCAmelCase = name.replace('''out_conv''' , '''projection''' ) if "resConfUnit1" in name: UpperCAmelCase = name.replace('''resConfUnit1''' , '''residual_layer1''' ) if "resConfUnit2" in name: UpperCAmelCase = name.replace('''resConfUnit2''' , '''residual_layer2''' ) if "conv1" in name: UpperCAmelCase = name.replace('''conv1''' , '''convolution1''' ) if "conv2" in name: UpperCAmelCase = name.replace('''conv2''' , '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCAmelCase = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: UpperCAmelCase = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: UpperCAmelCase = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: UpperCAmelCase = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCAmelCase = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: UpperCAmelCase = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: UpperCAmelCase = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: UpperCAmelCase = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: UpperCAmelCase = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: UpperCAmelCase = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: UpperCAmelCase = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: UpperCAmelCase = name.replace('''pretrained''' , '''dpt''' ) if "bn" in name: UpperCAmelCase = name.replace('''bn''' , '''batch_norm''' ) if "head" in name: UpperCAmelCase = name.replace('''head''' , '''head.head''' ) if "encoder.norm" in name: UpperCAmelCase = name.replace('''encoder.norm''' , '''layernorm''' ) if "auxlayer" in name: UpperCAmelCase = name.replace('''auxlayer''' , '''auxiliary_head.head''' ) return name def snake_case_ (_a : Optional[int] , _a : Optional[int] ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" ) UpperCAmelCase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase = in_proj_weight[: config.hidden_size, :] UpperCAmelCase = in_proj_bias[: config.hidden_size] UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase = in_proj_bias[-config.hidden_size :] def snake_case_ (): UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw ) return im @torch.no_grad() def snake_case_ (_a : Optional[int] , _a : Dict , _a : List[str] , _a : int ): UpperCAmelCase , UpperCAmelCase = get_dpt_config(_a ) # load original state_dict from URL UpperCAmelCase = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(_a ) # rename keys for key in state_dict.copy().keys(): UpperCAmelCase = state_dict.pop(_a ) UpperCAmelCase = val # read in qkv matrices read_in_q_k_v(_a , _a ) # load HuggingFace model UpperCAmelCase = DPTForSemanticSegmentation(_a ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_a ) model.load_state_dict(_a ) model.eval() # Check outputs on an image UpperCAmelCase = 4_8_0 if '''ade''' in checkpoint_url else 3_8_4 UpperCAmelCase = DPTImageProcessor(size=_a ) UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(_a , return_tensors='''pt''' ) # forward pass UpperCAmelCase = model(**_a ).logits if '''ade''' in checkpoint_url else model(**_a ).predicted_depth # Assert logits UpperCAmelCase = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ) if "ade" in checkpoint_url: UpperCAmelCase = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ) assert outputs.shape == torch.Size(_a ) assert ( torch.allclose(outputs[0, 0, :3, :3] , _a , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , _a ) ) Path(_a ).mkdir(exist_ok=_a ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(_a ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_a ) if push_to_hub: print('''Pushing model to hub...''' ) model.push_to_hub( repo_path_or_name=Path(_a , _a ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_a , ) image_processor.push_to_hub( repo_path_or_name=Path(_a , _a ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_a , ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) A =parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
34
'''simple docstring''' _lowerCamelCase : int = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
28
0
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __snake_case( _lowerCAmelCase ) -> Tuple: snake_case__ , snake_case__ : Tuple = image.size snake_case__ , snake_case__ : Union[str, Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 snake_case__ : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) snake_case__ : Union[str, Any] = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0 snake_case__ : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 ) snake_case__ : Tuple = torch.from_numpy(_lowerCAmelCase ) return 2.0 * image - 1.0 class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : List[str] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ ) @torch.no_grad() def __call__( self : Optional[Any] , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 100 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ): if isinstance(snake_case_ , PIL.Image.Image ): snake_case__ : str = 1 elif isinstance(snake_case_ , torch.Tensor ): snake_case__ : List[Any] = image.shape[0] else: raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}" ) if isinstance(snake_case_ , PIL.Image.Image ): snake_case__ : Union[str, Any] = preprocess(snake_case_ ) snake_case__ , snake_case__ : Tuple = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image snake_case__ : str = (batch_size, self.unet.config.in_channels // 2, height, width) snake_case__ : Dict = next(self.unet.parameters() ).dtype snake_case__ : int = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ ) snake_case__ : Optional[Any] = image.to(device=self.device , dtype=snake_case_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(snake_case_ , device=self.device ) snake_case__ : Optional[Any] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler snake_case__ : Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] snake_case__ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) snake_case__ : Optional[int] = {} if accepts_eta: snake_case__ : Optional[Any] = eta for t in self.progress_bar(snake_case_ ): # concat latents and low resolution image in the channel dimension. snake_case__ : int = torch.cat([latents, image] , dim=1 ) snake_case__ : Optional[int] = self.scheduler.scale_model_input(snake_case_ , snake_case_ ) # predict the noise residual snake_case__ : str = self.unet(snake_case_ , snake_case_ ).sample # compute the previous noisy sample x_t -> x_t-1 snake_case__ : Tuple = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample # decode the image latents with the VQVAE snake_case__ : Optional[int] = self.vqvae.decode(snake_case_ ).sample snake_case__ : Optional[int] = torch.clamp(snake_case_ , -1.0 , 1.0 ) snake_case__ : Dict = image / 2 + 0.5 snake_case__ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case__ : Tuple = self.numpy_to_pil(snake_case_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case_ )
35
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCamelCase : List[Any] = { "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
0
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' with open(_lowerCamelCase ) as metadata_file: _lowerCAmelCase : str = json.load(_lowerCamelCase ) _lowerCAmelCase : Tuple = LukeConfig(use_entity_aware_attention=_lowerCamelCase , **metadata["model_config"] ) # Load in the weights from the checkpoint_path _lowerCAmelCase : Dict = torch.load(_lowerCamelCase , map_location="cpu" )["module"] # Load the entity vocab file _lowerCAmelCase : List[Any] = load_original_entity_vocab(_lowerCamelCase ) # add an entry for [MASK2] _lowerCAmelCase : Dict = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _lowerCAmelCase : Any = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks _lowerCAmelCase : Union[str, Any] = AddedToken("<ent>" , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) _lowerCAmelCase : str = AddedToken("<ent2>" , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F"Saving tokenizer to {pytorch_dump_folder_path}" ) tokenizer.save_pretrained(_lowerCamelCase ) with open(os.path.join(_lowerCamelCase , "tokenizer_config.json" ) , "r" ) as f: _lowerCAmelCase : str = json.load(_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = "MLukeTokenizer" with open(os.path.join(_lowerCamelCase , "tokenizer_config.json" ) , "w" ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) with open(os.path.join(_lowerCamelCase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : int = MLukeTokenizer.from_pretrained(_lowerCamelCase ) # Initialize the embeddings of the special tokens _lowerCAmelCase : Any = tokenizer.convert_tokens_to_ids(["@"] )[0] _lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(["#"] )[0] _lowerCAmelCase : Optional[Any] = state_dict["embeddings.word_embeddings.weight"] _lowerCAmelCase : str = word_emb[ent_init_index].unsqueeze(0 ) _lowerCAmelCase : Optional[int] = word_emb[enta_init_index].unsqueeze(0 ) _lowerCAmelCase : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _lowerCAmelCase : Optional[Any] = state_dict[bias_name] _lowerCAmelCase : Any = decoder_bias[ent_init_index].unsqueeze(0 ) _lowerCAmelCase : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 ) _lowerCAmelCase : str = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _lowerCAmelCase : Optional[Any] = F"encoder.layer.{layer_index}.attention.self." _lowerCAmelCase : Tuple = state_dict[prefix + matrix_name] _lowerCAmelCase : int = state_dict[prefix + matrix_name] _lowerCAmelCase : List[str] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _lowerCAmelCase : Dict = state_dict["entity_embeddings.entity_embeddings.weight"] _lowerCAmelCase : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) _lowerCAmelCase : Dict = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _lowerCAmelCase : str = state_dict["entity_predictions.bias"] _lowerCAmelCase : str = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) _lowerCAmelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) _lowerCAmelCase : Optional[Any] = LukeForMaskedLM(config=_lowerCamelCase ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) _lowerCAmelCase : Any = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): _lowerCAmelCase : Optional[Any] = state_dict[key] else: _lowerCAmelCase : str = state_dict[key] _lowerCAmelCase , _lowerCAmelCase : Tuple = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) if set(_lowerCamelCase ) != {"luke.embeddings.position_ids"}: raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" ) if set(_lowerCamelCase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F"Unexpected missing_keys: {missing_keys}" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _lowerCAmelCase : str = MLukeTokenizer.from_pretrained(_lowerCamelCase , task="entity_classification" ) _lowerCAmelCase : str = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." _lowerCAmelCase : Dict = (0, 9) _lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , entity_spans=[span] , return_tensors="pt" ) _lowerCAmelCase : Dict = model(**_lowerCamelCase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _lowerCAmelCase : Tuple = torch.Size((1, 33, 768) ) _lowerCAmelCase : str = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _lowerCAmelCase : Optional[Any] = torch.Size((1, 1, 768) ) _lowerCAmelCase : int = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" F" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ): raise ValueError # Verify masked word/entity prediction _lowerCAmelCase : Union[str, Any] = MLukeTokenizer.from_pretrained(_lowerCamelCase ) _lowerCAmelCase : List[str] = "Tokyo is the capital of <mask>." _lowerCAmelCase : Any = (24, 30) _lowerCAmelCase : str = tokenizer(_lowerCamelCase , entity_spans=[span] , return_tensors="pt" ) _lowerCAmelCase : Any = model(**_lowerCamelCase ) _lowerCAmelCase : List[Any] = encoding["input_ids"][0].tolist() _lowerCAmelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) _lowerCAmelCase : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(_lowerCamelCase ) _lowerCAmelCase : Tuple = outputs.entity_logits[0][0].argmax().item() _lowerCAmelCase : int = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(_lowerCamelCase ) ) model.save_pretrained(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = ["[MASK]", "[PAD]", "[UNK]"] _lowerCAmelCase : Dict = [json.loads(_lowerCamelCase ) for line in open(_lowerCamelCase )] _lowerCAmelCase : List[Any] = {} for entry in data: _lowerCAmelCase : int = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _lowerCAmelCase : Optional[Any] = entity_id break _lowerCAmelCase : int = F"{language}:{entity_name}" _lowerCAmelCase : Optional[int] = entity_id return new_mapping if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) _snake_case = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
36
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict: """simple docstring""" UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T return jnp.matmul(A__ , norm_emb_a.T ) class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = jnp.floataa def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config ) UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype ) UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) ) UpperCamelCase = self.param( 'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) ) UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) ) UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) ) def __call__( self : str , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = self.vision_model(UpperCamelCase__ )[1] UpperCamelCase = self.visual_projection(UpperCamelCase__ ) UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds ) UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs UpperCamelCase = 0.0 UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment UpperCamelCase = jnp.round(UpperCamelCase__ , 3 ) UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ ) # Use a lower threshold if an image has any special care concept UpperCamelCase = is_special_care * 0.0_1 UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment UpperCamelCase = jnp.round(UpperCamelCase__ , 3 ) UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = CLIPConfig _SCREAMING_SNAKE_CASE = """clip_input""" _SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ): """simple docstring""" if input_shape is None: UpperCamelCase = (1, 2_2_4, 2_2_4, 3) UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ ) super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init ) def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ): """simple docstring""" UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ ) UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng} UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params'] return random_params def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ): """simple docstring""" UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) ) return self.module.apply( {'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
28
0
'''simple docstring''' import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments _lowerCAmelCase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[float] = field( default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} ) __lowercase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Whether to SortishSamler or not.'''} ) __lowercase : bool = field( default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) __lowercase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''whether to use adafactor'''} ) __lowercase : Optional[float] = field( default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} ) __lowercase : Optional[float] = field( default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} ) __lowercase : Optional[float] = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} ) __lowercase : Optional[float] = field( default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} ) __lowercase : Optional[str] = field( default='''linear''' , metadata={'''help''': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
37
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _lowerCamelCase : str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ): """simple docstring""" warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
28
0
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : Optional[Any] = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) snake_case__ : Any = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) snake_case__ : Union[str, Any] = False snake_case__ : Dict = False def _A ( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int=False ): UpperCamelCase :Dict = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): UpperCamelCase :List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str=13 , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : List[str]=None , ): UpperCamelCase :Optional[int] = parent UpperCamelCase :str = batch_size UpperCamelCase :Optional[int] = seq_length UpperCamelCase :Optional[Any] = is_training UpperCamelCase :List[str] = use_input_mask UpperCamelCase :Union[str, Any] = use_token_type_ids UpperCamelCase :Tuple = use_labels UpperCamelCase :Union[str, Any] = vocab_size UpperCamelCase :Optional[Any] = hidden_size UpperCamelCase :Any = num_hidden_layers UpperCamelCase :List[str] = num_attention_heads UpperCamelCase :List[str] = intermediate_size UpperCamelCase :int = hidden_act UpperCamelCase :Any = hidden_dropout_prob UpperCamelCase :int = attention_probs_dropout_prob UpperCamelCase :str = max_position_embeddings UpperCamelCase :str = type_vocab_size UpperCamelCase :Union[str, Any] = type_sequence_label_size UpperCamelCase :Tuple = initializer_range UpperCamelCase :List[Any] = num_labels UpperCamelCase :Union[str, Any] = num_choices UpperCamelCase :List[Any] = scope UpperCamelCase :Any = embedding_size def _A ( self : Dict ): UpperCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase :List[Any] = None if self.use_input_mask: UpperCamelCase :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase :Tuple = None if self.use_token_type_ids: UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase :Optional[int] = None UpperCamelCase :Dict = None UpperCamelCase :Tuple = None if self.use_labels: UpperCamelCase :Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase :Tuple = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase :Union[str, Any] = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _A ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ): UpperCamelCase :Dict = TFMobileBertModel(config=__lowerCamelCase ) UpperCamelCase :Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCamelCase :str = model(__lowerCamelCase ) UpperCamelCase :Any = [input_ids, input_mask] UpperCamelCase :Dict = model(__lowerCamelCase ) UpperCamelCase :int = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _A ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ): UpperCamelCase :Optional[Any] = TFMobileBertForMaskedLM(config=__lowerCamelCase ) UpperCamelCase :int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCamelCase :Dict = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ): UpperCamelCase :str = TFMobileBertForNextSentencePrediction(config=__lowerCamelCase ) UpperCamelCase :Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCamelCase :int = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _A ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ): UpperCamelCase :Optional[int] = TFMobileBertForPreTraining(config=__lowerCamelCase ) UpperCamelCase :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCamelCase :Any = model(__lowerCamelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ): UpperCamelCase :int = self.num_labels UpperCamelCase :Union[str, Any] = TFMobileBertForSequenceClassification(config=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCamelCase :List[str] = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ): UpperCamelCase :str = self.num_choices UpperCamelCase :List[str] = TFMobileBertForMultipleChoice(config=__lowerCamelCase ) UpperCamelCase :Dict = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase :Tuple = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase :Optional[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase :int = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } UpperCamelCase :List[str] = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict ): UpperCamelCase :str = self.num_labels UpperCamelCase :int = TFMobileBertForTokenClassification(config=__lowerCamelCase ) UpperCamelCase :Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCamelCase :Tuple = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ): UpperCamelCase :Union[str, Any] = TFMobileBertForQuestionAnswering(config=__lowerCamelCase ) UpperCamelCase :Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCamelCase :Tuple = model(__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A ( self : str ): UpperCamelCase :Tuple = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Dict = config_and_inputs UpperCamelCase :Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict def _A ( self : Tuple ): UpperCamelCase :int = TFMobileBertModelTest.TFMobileBertModelTester(self ) UpperCamelCase :Dict = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def _A ( self : int ): self.config_tester.run_common_tests() def _A ( self : List[Any] ): UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCamelCase ) def _A ( self : Optional[Any] ): UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCamelCase ) def _A ( self : Union[str, Any] ): UpperCamelCase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCamelCase ) def _A ( self : str ): UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCamelCase ) def _A ( self : Dict ): UpperCamelCase :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCamelCase ) def _A ( self : Any ): UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCamelCase ) def _A ( self : Tuple ): UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCamelCase ) def _A ( self : Any ): UpperCamelCase :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCamelCase ) @slow def _A ( self : int ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: UpperCamelCase :Optional[int] = TFMobileBertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_tf class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def _A ( self : Tuple ): UpperCamelCase :List[Any] = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" ) UpperCamelCase :str = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase :Dict = model(__lowerCamelCase )[0] UpperCamelCase :str = [1, 6, 30_522] self.assertEqual(output.shape , __lowerCamelCase ) UpperCamelCase :int = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
38
'''simple docstring''' import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed _lowerCamelCase : Optional[int] = logging.getLogger(__name__) def __lowerCamelCase ( A__=2 , A__=3 , A__=16 , A__ = 10 , A__ = 2 ) -> int: """simple docstring""" def get_dataset(A__ ): UpperCamelCase = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCamelCase = get_dataset(A__ ) UpperCamelCase = get_dataset(A__ ) UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__=None ) -> int: """simple docstring""" UpperCamelCase = [] for epoch in range(A__ ): # Train quickly model.train() for batch in dataloader: UpperCamelCase , UpperCamelCase = batch UpperCamelCase = model(A__ ) UpperCamelCase = torch.nn.functional.mse_loss(A__ , A__ ) accelerator.backward(A__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self : Tuple ): """simple docstring""" super().__init__() UpperCamelCase = nn.Parameter(torch.randn(1 ) ) UpperCamelCase = nn.Parameter(torch.randn(1 ) ) def A ( self : str , UpperCamelCase__ : Dict ): """simple docstring""" return x * self.a + self.b class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def A ( self : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase__ , automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def A ( self : Optional[int] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() # Train baseline UpperCamelCase = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial UpperCamelCase = os.path.join(UpperCamelCase__ , 'initial' ) accelerator.save_state(UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) accelerator.load_state(UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save everything UpperCamelCase = os.path.join(UpperCamelCase__ , 'checkpoint' ) accelerator.save_state(UpperCamelCase__ ) # Load everything back in and make sure all states work accelerator.load_state(UpperCamelCase__ ) test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase__ ) UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch.tensor([1, 2, 3] ) UpperCamelCase = torch.tensor([2, 3, 4] ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(net.parameters() ) UpperCamelCase = Accelerator() with self.assertRaises(UpperCamelCase__ ) as ve: accelerator.register_for_checkpointing(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def A ( self : Dict ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase__ , step_size=1 , gamma=0.9_9 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() UpperCamelCase = scheduler.state_dict() train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertNotEqual(UpperCamelCase__ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(UpperCamelCase__ , scheduler.state_dict() ) def A ( self : List[str] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ , total_limit=2 ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase = accelerator.prepare(UpperCamelCase__ ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def A ( self : Dict ): """simple docstring""" UpperCamelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() ) if __name__ == "__main__": _lowerCamelCase : Optional[int] = "/tmp/accelerate/state_checkpointing" _lowerCamelCase : Union[str, Any] = DummyModel() _lowerCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3) _lowerCamelCase : List[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) _lowerCamelCase ,_lowerCamelCase : Tuple = dummy_dataloaders() _lowerCamelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline _lowerCamelCase : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Union[str, Any] = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) _lowerCamelCase ,_lowerCamelCase : Tuple = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: _lowerCamelCase : Any = group["params"][0].device break assert param_device.type == accelerator.device.type _lowerCamelCase : Tuple = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: _lowerCamelCase : Optional[Any] = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: _lowerCamelCase : Dict = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
28
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _a = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
39
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _lowerCamelCase : List[str] = 5_0000 _lowerCamelCase : Optional[int] = 5000 _lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__) _lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" for i in range(A__ ): UpperCamelCase = dataset[i] @get_duration def __lowerCamelCase ( A__ , A__ , A__ ) -> int: """simple docstring""" for i in range(0 , len(A__ ) , A__ ): UpperCamelCase = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]: """simple docstring""" with dataset.formatted_as(type=A__ ): for i in range(A__ ): UpperCamelCase = dataset[i] @get_duration def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int: """simple docstring""" with dataset.formatted_as(type=A__ ): for i in range(0 , A__ , A__ ): UpperCamelCase = dataset[i : i + batch_size] def __lowerCamelCase ( ) -> List[str]: """simple docstring""" UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES} UpperCamelCase = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}), ] UpperCamelCase = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) UpperCamelCase = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) UpperCamelCase = generate_example_dataset( os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(A__ ) ) UpperCamelCase = func(A__ , **A__ ) print('shuffling dataset' ) UpperCamelCase = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(A__ ) ) UpperCamelCase = func( A__ , **A__ ) with open(A__ , 'wb' ) as f: f.write(json.dumps(A__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
28
0
"""simple docstring""" import requests from bsa import BeautifulSoup def lowercase ( A_ , A_ )-> str: '''simple docstring''' a : List[Any] = BeautifulSoup(requests.get(A_ , params=A_ ).content , "html.parser" ) a : Tuple = soup.find("div" , attrs={"class": "gs_ri"} ) a : Any = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" ) return anchors[2].get_text() if __name__ == "__main__": __lowercase = { """title""": ( """Precisely geometry controlled microsupercapacitors for ultrahigh areal """ """capacitance, volumetric capacitance, and energy density""" ), """journal""": """Chem. Mater.""", """volume""": 30, """pages""": """3979-3990""", """year""": 2018, """hl""": """en""", } print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
40
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets _lowerCamelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n" _lowerCamelCase : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n" _lowerCamelCase : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def A ( self : Union[str, Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False ): """simple docstring""" if rouge_types is None: UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ ) if use_aggregator: UpperCamelCase = scoring.BootstrapAggregator() else: UpperCamelCase = [] for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = scorer.score(UpperCamelCase__ , UpperCamelCase__ ) if use_aggregator: aggregator.add_scores(UpperCamelCase__ ) else: scores.append(UpperCamelCase__ ) if use_aggregator: UpperCamelCase = aggregator.aggregate() else: UpperCamelCase = {} for key in scores[0]: UpperCamelCase = [score[key] for score in scores] return result
28
0
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _A : Tuple =logging.get_logger(__name__) _A : Any ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _A : Optional[int] ={ '''vocab_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''', }, '''merges_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''', }, } _A : Dict ={ '''gpt2''': 1_024, '''gpt2-medium''': 1_024, '''gpt2-large''': 1_024, '''gpt2-xl''': 1_024, '''distilgpt2''': 1_024, } class _lowercase ( _lowercase ): a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["""input_ids""", """attention_mask"""] a = GPTaTokenizer def __init__( self: Any , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Optional[Any]="<|endoftext|>" , UpperCamelCase__: Tuple="<|endoftext|>" , UpperCamelCase__: int="<|endoftext|>" , UpperCamelCase__: str=False , **UpperCamelCase__: Any , ): super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase__ : Union[str, Any] = kwargs.pop("""add_bos_token""" , UpperCamelCase__ ) lowerCamelCase__ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: lowerCamelCase__ : Optional[Any] = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) ) lowerCamelCase__ : Optional[int] = add_prefix_space lowerCamelCase__ : Dict = pre_tok_class(**UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = add_prefix_space def lowerCamelCase_ ( self: str , *UpperCamelCase__: str , **UpperCamelCase__: Tuple ): lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict , *UpperCamelCase__: List[str] , **UpperCamelCase__: Any ): lowerCamelCase__ : Optional[int] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ): lowerCamelCase__ : Tuple = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def lowerCamelCase_ ( self: str , UpperCamelCase__: "Conversation" ): lowerCamelCase__ : Tuple = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] ) if len(UpperCamelCase__ ) > self.model_max_length: lowerCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :] return input_ids
41
'''simple docstring''' from PIL import Image def __lowerCamelCase ( A__ , A__ ) -> Image: """simple docstring""" def brightness(A__ ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(A__ ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 _lowerCamelCase : List[str] = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
28
0
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging lowercase : Optional[Any] = logging.get_logger(__name__) class __UpperCAmelCase ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_=None , **lowerCAmelCase_ ): """simple docstring""" warnings.warn( '`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` ' 'instead.' , lowerCAmelCase_ , ) super().__init__(args=lowerCAmelCase_ , **lowerCAmelCase_ )
42
'''simple docstring''' from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
28
0
from typing import Any class lowerCamelCase_ : '''simple docstring''' def __init__( self , __lowercase) -> List[Any]: __UpperCamelCase :Optional[int] = data __UpperCamelCase :List[Any] = None class lowerCamelCase_ : '''simple docstring''' def __init__( self) -> Dict: __UpperCamelCase :Union[str, Any] = None def UpperCamelCase__ ( self) -> str: __UpperCamelCase :str = self.head while temp is not None: print(temp.data , end=''' ''') __UpperCamelCase :Optional[Any] = temp.next print() def UpperCamelCase__ ( self , __lowercase) -> List[str]: __UpperCamelCase :Any = Node(__lowercase) __UpperCamelCase :Dict = self.head __UpperCamelCase :Any = new_node def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Dict: if node_data_a == node_data_a: return else: __UpperCamelCase :List[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCamelCase :Optional[Any] = node_a.next __UpperCamelCase :Union[str, Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCamelCase :Tuple = node_a.next if node_a is None or node_a is None: return __UpperCamelCase , __UpperCamelCase :Optional[int] = node_a.data, node_a.data if __name__ == "__main__": __lowercase = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('''After swapping''') ll.print_list()
43
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Optional[int]=3_6 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Union[str, Any]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def A ( self : int ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Optional[int] ): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.get_config() UpperCamelCase = 3_0_0 return config def A ( self : Tuple ): """simple docstring""" ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = self.prepare_config_and_inputs() UpperCamelCase = True UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): """simple docstring""" UpperCamelCase = MraModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , ): """simple docstring""" UpperCamelCase = True UpperCamelCase = MraModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = MraForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = MraForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MraForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MraForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict ): """simple docstring""" UpperCamelCase = self.num_choices UpperCamelCase = MraForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = () def A ( self : str ): """simple docstring""" UpperCamelCase = MraModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : str ): """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def A ( self : List[Any] ): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = MraModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @unittest.skip(reason='MRA does not output attentions' ) def A ( self : List[str] ): """simple docstring""" return @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = torch.Size((1, 2_5_6, 7_6_8) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = 5_0_2_6_5 UpperCamelCase = torch.Size((1, 2_5_6, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) UpperCamelCase = torch.arange(4_0_9_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = 5_0_2_6_5 UpperCamelCase = torch.Size((1, 4_0_9_6, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor( [[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
28
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : Optional[int] = logging.get_logger(__name__) _a : List[Any] = { 'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json', 'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json', 'kssteven/ibert-roberta-large-mnli': ( 'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json' ), } class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : List[str] = "ibert" def __init__( self , a__=30522 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=False , a__="none" , **a__ , ): super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ ) _lowerCAmelCase : List[str] = vocab_size _lowerCAmelCase : Tuple = hidden_size _lowerCAmelCase : int = num_hidden_layers _lowerCAmelCase : Union[str, Any] = num_attention_heads _lowerCAmelCase : Optional[Any] = hidden_act _lowerCAmelCase : List[Any] = intermediate_size _lowerCAmelCase : List[str] = hidden_dropout_prob _lowerCAmelCase : Dict = attention_probs_dropout_prob _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Dict = type_vocab_size _lowerCAmelCase : Union[str, Any] = initializer_range _lowerCAmelCase : Union[str, Any] = layer_norm_eps _lowerCAmelCase : Optional[int] = position_embedding_type _lowerCAmelCase : Any = quant_mode _lowerCAmelCase : Union[str, Any] = force_dequant class __A ( SCREAMING_SNAKE_CASE_ ): @property def __A ( self ): if self.task == "multiple-choice": _lowerCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCAmelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
44
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _lowerCamelCase : Union[str, Any] = "\\n\n" _lowerCamelCase : List[str] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" _lowerCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def A ( self : Tuple ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , ) def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int = 1_6 , UpperCamelCase__ : bool = True , UpperCamelCase__ : List[Any]=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCamelCase = 'cuda' else: UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu' UpperCamelCase = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ ) UpperCamelCase = model.to(UpperCamelCase__ ) UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(UpperCamelCase__ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCamelCase = model.config.max_length - 1 else: UpperCamelCase = model.config.max_length UpperCamelCase = tokenizer( UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='pt' , return_attention_mask=UpperCamelCase__ , ).to(UpperCamelCase__ ) UpperCamelCase = encodings['input_ids'] UpperCamelCase = encodings['attention_mask'] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCamelCase = [] UpperCamelCase = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ) ): UpperCamelCase = min(start_index + batch_size , len(UpperCamelCase__ ) ) UpperCamelCase = encoded_texts[start_index:end_index] UpperCamelCase = attn_masks[start_index:end_index] if add_start_token: UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase__ ) UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCamelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase__ ), attn_mask] , dim=1 ) UpperCamelCase = encoded_batch with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ).logits UpperCamelCase = out_logits[..., :-1, :].contiguous() UpperCamelCase = labels[..., 1:].contiguous() UpperCamelCase = attn_mask[..., 1:].contiguous() UpperCamelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase__ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase__ )}
28
0
"""simple docstring""" lowercase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] lowercase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] lowercase_ = { 0: "Sunday", 1: "Monday", 2: "Tuesday", 3: "Wednesday", 4: "Thursday", 5: "Friday", 6: "Saturday", } def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> str: assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: __a = year // 100 __a = (5 * (century % 4) + 2) % 7 __a = year % 100 __a = centurian % 12 __a = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __a = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) else DOOMSDAY_LEAP[month - 1] ) __a = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
45
'''simple docstring''' def __lowerCamelCase ( A__ = 50 ) -> int: """simple docstring""" UpperCamelCase = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
28
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = "▁" SCREAMING_SNAKE_CASE__ = {"vocab_file": "sentencepiece.bpe.model"} SCREAMING_SNAKE_CASE__ = { "vocab_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model" ), } } SCREAMING_SNAKE_CASE__ = { "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase = None , **lowercase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , ) lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowercase ) ) lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCAmelCase = 1 lowerCAmelCase = len(self.sp_model ) + self.fairseq_offset lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Optional[Any]: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None lowerCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self , lowercase ) -> int: lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _snake_case ( self , lowercase , lowercase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase ) if token_ids_a is None: return [1] + ([0] * len(lowercase )) + [1] return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1] def _snake_case ( self , lowercase , lowercase = None ) -> List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case ( self ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _snake_case ( self ) -> str: lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , lowercase ) -> List[str]: return self.sp_model.encode(lowercase , out_type=lowercase ) def _snake_case ( self , lowercase ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase = self.sp_model.PieceToId(lowercase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self , lowercase ) -> List[str]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self , lowercase ) -> List[Any]: lowerCAmelCase = """""".join(lowercase ).replace(lowercase , """ """ ).strip() return out_string def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]: if not os.path.isdir(lowercase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase = os.path.join( lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase ) elif not os.path.isfile(self.vocab_file ): with open(lowercase , """wb""" ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(lowercase ) return (out_vocab_file,)
46
'''simple docstring''' def __lowerCamelCase ( A__ ) -> list: """simple docstring""" UpperCamelCase = len(A__ ) for i in range(1 , A__ ): UpperCamelCase = collection[i] UpperCamelCase = 0 UpperCamelCase = i - 1 while low <= high: UpperCamelCase = (low + high) // 2 if val < collection[mid]: UpperCamelCase = mid - 1 else: UpperCamelCase = mid + 1 for j in range(A__ , A__ , -1 ): UpperCamelCase = collection[j - 1] UpperCamelCase = val return collection if __name__ == "__main__": _lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip() _lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
28
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class A__ ( A__ ): def __init__( self : str , *_a : Optional[int] , **_a : Dict ) -> None: '''simple docstring''' warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , _a , ) super().__init__(*_a , **_a )
47
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = None def __lowerCamelCase ( A__ , A__=0.999 , A__="cosine" , ) -> Tuple: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(A__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A__ ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCamelCase = [] for i in range(A__ ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) ) return torch.tensor(A__ , dtype=torch.floataa ) class SCREAMING_SNAKE_CASE ( _a , _a ): """simple docstring""" @register_to_config def __init__( self : List[str] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : str = "fixed_small_log" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[float] = 1.0 , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : str = "squaredcos_cap_v2" , ): """simple docstring""" if beta_schedule != "squaredcos_cap_v2": raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' ) UpperCamelCase = betas_for_alpha_bar(UpperCamelCase__ ) UpperCamelCase = 1.0 - self.betas UpperCamelCase = torch.cumprod(self.alphas , dim=0 ) UpperCamelCase = torch.tensor(1.0 ) # standard deviation of the initial noise distribution UpperCamelCase = 1.0 # setable values UpperCamelCase = None UpperCamelCase = torch.from_numpy(np.arange(0 , UpperCamelCase__ )[::-1].copy() ) UpperCamelCase = variance_type def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ): """simple docstring""" return sample def A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ): """simple docstring""" UpperCamelCase = num_inference_steps UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) UpperCamelCase = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ ) def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None ): """simple docstring""" if prev_timestep is None: UpperCamelCase = t - 1 UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase = self.betas[t] else: UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: UpperCamelCase = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": UpperCamelCase = torch.log(torch.clamp(UpperCamelCase__ , min=1E-2_0 ) ) UpperCamelCase = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler UpperCamelCase = variance.log() UpperCamelCase = beta.log() UpperCamelCase = (predicted_variance + 1) / 2 UpperCamelCase = frac * max_log + (1 - frac) * min_log return variance def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 ) else: UpperCamelCase = None # 1. compute alphas, betas if prev_timestep is None: UpperCamelCase = t - 1 UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase = self.betas[t] UpperCamelCase = self.alphas[t] else: UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev UpperCamelCase = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCamelCase = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" ' for the UnCLIPScheduler.' ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCamelCase = torch.clamp( UpperCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCamelCase = 0 if t > 0: UpperCamelCase = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ , device=model_output.device ) UpperCamelCase = self._get_variance( UpperCamelCase__ , predicted_variance=UpperCamelCase__ , prev_timestep=UpperCamelCase__ , ) if self.variance_type == "fixed_small_log": UpperCamelCase = variance elif self.variance_type == "learned_range": UpperCamelCase = (0.5 * variance).exp() else: raise ValueError( f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" ' for the UnCLIPScheduler.' ) UpperCamelCase = variance * variance_noise UpperCamelCase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.IntTensor , ): """simple docstring""" UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) UpperCamelCase = timesteps.to(original_samples.device ) UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
28
0
import numpy as np def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray: return np.where(vector > 0 ,_SCREAMING_SNAKE_CASE ,(alpha * (np.exp(_SCREAMING_SNAKE_CASE ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
48
'''simple docstring''' import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = num_stages UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = initializer_range UpperCamelCase = out_features UpperCamelCase = out_indices UpperCamelCase = scope def A ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def A ( self : List[str] ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = ConvNextModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( {"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def A ( self : Tuple ): """simple docstring""" UpperCamelCase = ConvNextModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : List[str] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : Optional[int] ): """simple docstring""" return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def A ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def A ( self : List[Any] ): """simple docstring""" pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def A ( self : Optional[int] ): """simple docstring""" pass def A ( self : Any ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(UpperCamelCase__ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def A ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ): UpperCamelCase = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def A ( self : Dict ): """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def __lowerCamelCase ( ) -> Any: """simple docstring""" UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def A ( self : Optional[Any] ): """simple docstring""" return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**UpperCamelCase__ ) # verify the logits UpperCamelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else () _SCREAMING_SNAKE_CASE = ConvNextConfig _SCREAMING_SNAKE_CASE = False def A ( self : Tuple ): """simple docstring""" UpperCamelCase = ConvNextModelTester(self )
28
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case :int = logging.get_logger(__name__) __snake_case :Optional[Any] = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class _A ( __UpperCAmelCase ): UpperCamelCase__ : List[str] = '''mobilenet_v1''' def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=224 , __SCREAMING_SNAKE_CASE : Any=1.0 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : Dict="relu6" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=0.9_99 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , **__SCREAMING_SNAKE_CASE : str , ): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''') __a = num_channels __a = image_size __a = depth_multiplier __a = min_depth __a = hidden_act __a = tf_padding __a = classifier_dropout_prob __a = initializer_range __a = layer_norm_eps class _A ( __UpperCAmelCase ): UpperCamelCase__ : List[str] = version.parse('''1.11''' ) @property def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return OrderedDict([('''pixel_values''', {0: '''batch'''})]) @property def _lowerCamelCase ( self : List[str]): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})]) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})]) @property def _lowerCamelCase ( self : Dict): '''simple docstring''' return 1E-4
49
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : int = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _lowerCamelCase : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict: """simple docstring""" UpperCamelCase = state_dict.pop(A__ ) UpperCamelCase = val def __lowerCamelCase ( A__ ) -> int: """simple docstring""" UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def __lowerCamelCase ( A__ ) -> Dict: """simple docstring""" UpperCamelCase = '' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase = state_dict.pop( F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase = in_proj_weight_cross_attn[:256, :] UpperCamelCase = in_proj_bias_cross_attn[:256] UpperCamelCase = in_proj_weight_cross_attn[256:512, :] UpperCamelCase = in_proj_bias_cross_attn[256:512] UpperCamelCase = in_proj_weight_cross_attn[-256:, :] UpperCamelCase = in_proj_bias_cross_attn[-256:] def __lowerCamelCase ( A__ , A__ ) -> Optional[int]: """simple docstring""" UpperCamelCase , UpperCamelCase = image.size UpperCamelCase = max(A__ , A__ ) UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000 UpperCamelCase = target_max_size / current_max_size UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def __lowerCamelCase ( A__ ) -> List[Any]: """simple docstring""" UpperCamelCase = F.to_tensor(A__ ) UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]: """simple docstring""" logger.info('Converting model...' ) # load original state dict UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' ) # rename keys for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) UpperCamelCase = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = 'model.' for key in state_dict.copy().keys(): if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): UpperCamelCase = state_dict.pop(A__ ) UpperCamelCase = val # create HuggingFace model and load state dict UpperCamelCase = TableTransformerConfig( backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase = 15 UpperCamelCase = 2 UpperCamelCase = {0: 'table', 1: 'table rotated'} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} else: UpperCamelCase = 125 UpperCamelCase = 6 UpperCamelCase = { 0: 'table', 1: 'table column', 2: 'table row', 3: 'table column header', 4: 'table projected row header', 5: 'table spanning cell', } UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} UpperCamelCase = DetrImageProcessor( format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 ) UpperCamelCase = TableTransformerForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() # verify our conversion UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png' UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ ) UpperCamelCase = Image.open(A__ ).convert('RGB' ) UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 ) UpperCamelCase = model(A__ ) if "detection" in checkpoint_url: UpperCamelCase = (1, 15, 3) UpperCamelCase = torch.tensor( [[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] ) UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] ) else: UpperCamelCase = (1, 125, 7) UpperCamelCase = torch.tensor( [[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] ) UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if push_to_hub: # Push model to HF hub logger.info('Pushing model to the hub...' ) UpperCamelCase = ( 'microsoft/table-transformer-detection' if 'detection' in checkpoint_url else 'microsoft/table-transformer-structure-recognition' ) model.push_to_hub(A__ ) image_processor.push_to_hub(A__ ) if __name__ == "__main__": _lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", type=str, choices=[ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", ], help="URL of the Table Transformer checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowerCamelCase : int = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
28
0
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class lowerCAmelCase : UpperCAmelCase__ = MBartConfig UpperCAmelCase__ = {} UpperCAmelCase__ = """gelu""" def __init__( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=13 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : Dict=99 , UpperCAmelCase : int=32 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=4 , UpperCAmelCase : Any=37 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[str]=20 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Dict=0 , ) -> Dict: lowerCamelCase__ : Union[str, Any] = parent lowerCamelCase__ : List[str] = batch_size lowerCamelCase__ : str = seq_length lowerCamelCase__ : str = is_training lowerCamelCase__ : Dict = use_labels lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : int = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Dict = num_attention_heads lowerCamelCase__ : Optional[Any] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase__ : Dict = max_position_embeddings lowerCamelCase__ : Dict = eos_token_id lowerCamelCase__ : Any = pad_token_id lowerCamelCase__ : int = bos_token_id def A_ ( self : Tuple ) -> int: lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCamelCase__ : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCamelCase__ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCamelCase__ : int = prepare_mbart_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return config, inputs_dict def A_ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] ) -> Union[str, Any]: lowerCamelCase__ : Tuple = TFMBartModel(config=UpperCAmelCase ).get_decoder() lowerCamelCase__ : int = inputs_dict['input_ids'] lowerCamelCase__ : Union[str, Any] = input_ids[:1, :] lowerCamelCase__ : str = inputs_dict['attention_mask'][:1, :] lowerCamelCase__ : Union[str, Any] = inputs_dict['head_mask'] lowerCamelCase__ : Union[str, Any] = 1 # first forward pass lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase ) lowerCamelCase__ , lowerCamelCase__ : Any = outputs.to_tuple() lowerCamelCase__ : Optional[int] = past_key_values[1] def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]: if attention_mask is None: lowerCamelCase__ : Union[str, Any] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase__ : Any = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ): UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase__ = ( { """conversational""": TFMBartForConditionalGeneration, """feature-extraction""": TFMBartModel, """summarization""": TFMBartForConditionalGeneration, """text2text-generation""": TFMBartForConditionalGeneration, """translation""": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = False def A_ ( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : str ) -> Tuple: if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def A_ ( self : Dict ) -> int: lowerCamelCase__ : Dict = TFMBartModelTester(self ) lowerCamelCase__ : List[str] = ConfigTester(self , config_class=UpperCAmelCase ) def A_ ( self : List[Any] ) -> str: self.config_tester.run_common_tests() def A_ ( self : Tuple ) -> int: lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class lowerCAmelCase ( unittest.TestCase ): UpperCAmelCase__ = [ """ UN Chief Says There Is No Military Solution in Syria""", ] UpperCAmelCase__ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", ] UpperCAmelCase__ = """facebook/mbart-large-en-ro""" @cached_property def A_ ( self : Union[str, Any] ) -> Optional[Any]: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def A_ ( self : Optional[Any] ) -> Any: lowerCamelCase__ : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def A_ ( self : List[Any] , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]: lowerCamelCase__ : List[str] = self.translate_src_text(**UpperCAmelCase ) self.assertListEqual(self.expected_text , UpperCAmelCase ) def A_ ( self : str , **UpperCAmelCase : int ) -> List[str]: lowerCamelCase__ : Union[str, Any] = self.tokenizer(self.src_text , **UpperCAmelCase , return_tensors='tf' ) lowerCamelCase__ : Optional[int] = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) lowerCamelCase__ : str = self.tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) return generated_words @slow def A_ ( self : str ) -> Union[str, Any]: self._assert_generated_batch_equal_expected()
50
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowerCamelCase : Any = logging.get_logger(__name__) @add_end_docstrings(_a ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ): """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) requires_backends(self , 'decord' ) self.check_model_type(UpperCamelCase__ ) def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ): """simple docstring""" UpperCamelCase = {} if frame_sampling_rate is not None: UpperCamelCase = frame_sampling_rate if num_frames is not None: UpperCamelCase = num_frames UpperCamelCase = {} if top_k is not None: UpperCamelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ): """simple docstring""" return super().__call__(UpperCamelCase__ , **UpperCamelCase__ ) def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ): """simple docstring""" if num_frames is None: UpperCamelCase = self.model.config.num_frames if video.startswith('http://' ) or video.startswith('https://' ): UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content ) UpperCamelCase = VideoReader(UpperCamelCase__ ) videoreader.seek(0 ) UpperCamelCase = 0 UpperCamelCase = num_frames * frame_sampling_rate - 1 UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa ) UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy() UpperCamelCase = list(UpperCamelCase__ ) UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework ) return model_inputs def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = self.model(**UpperCamelCase__ ) return model_outputs def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCamelCase = self.model.config.num_labels if self.framework == "pt": UpperCamelCase = model_outputs.logits.softmax(-1 )[0] UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ ) else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCamelCase = scores.tolist() UpperCamelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
28
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case_ : Optional[int] = logging.get_logger(__name__) snake_case_ : List[Any] = { "microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json", } class __snake_case ( a , a ): UpperCAmelCase__ : Optional[int] = '''focalnet''' def __init__( self : List[Any] , _snake_case : Any=224 , _snake_case : Optional[Any]=4 , _snake_case : Dict=3 , _snake_case : Tuple=96 , _snake_case : str=False , _snake_case : int=[192, 384, 768, 768] , _snake_case : Optional[Any]=[2, 2, 6, 2] , _snake_case : Optional[Any]=[2, 2, 2, 2] , _snake_case : Tuple=[3, 3, 3, 3] , _snake_case : str="gelu" , _snake_case : List[Any]=4.0 , _snake_case : str=0.0 , _snake_case : Optional[int]=0.1 , _snake_case : Dict=False , _snake_case : List[str]=1e-4 , _snake_case : Dict=False , _snake_case : Any=False , _snake_case : str=False , _snake_case : Union[str, Any]=0.0_2 , _snake_case : Tuple=1e-5 , _snake_case : Optional[Any]=32 , _snake_case : Tuple=None , _snake_case : List[str]=None , **_snake_case : Dict , ): """simple docstring""" super().__init__(**_snake_case) UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = embed_dim UpperCAmelCase_ = use_conv_embed UpperCAmelCase_ = hidden_sizes UpperCAmelCase_ = depths UpperCAmelCase_ = focal_levels UpperCAmelCase_ = focal_windows UpperCAmelCase_ = hidden_act UpperCAmelCase_ = mlp_ratio UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = drop_path_rate UpperCAmelCase_ = use_layerscale UpperCAmelCase_ = layerscale_value UpperCAmelCase_ = use_post_layernorm UpperCAmelCase_ = use_post_layernorm_in_modulation UpperCAmelCase_ = normalize_modulator UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = encoder_stride UpperCAmelCase_ = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths) + 1)] UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices( out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names)
51
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _lowerCamelCase : Optional[int] = ( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) _lowerCamelCase : Union[str, Any] = ( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) _lowerCamelCase : Dict = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) _lowerCamelCase : Dict = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) _lowerCamelCase : Optional[Any] = ( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]), ("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [14, 13, 12, 11, 10]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) _lowerCamelCase : List[Any] = ( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) _lowerCamelCase : List[str] = ( ("JH AH TH KH QH", 23), ("JH 9H TH KH QH", 22), ("JC KH JS JD JH", 21), ("KH KC 3S 3H 3D", 20), ("8C 9C 5C 3C TC", 19), ("JS QS 9H TS KH", 18), ("7C 7S KH 2H 7H", 17), ("3C KH 5D 5S KH", 16), ("QH 8H KD JH 8S", 15), ("2D 6D 9D TH 7D", 14), ) def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) ) UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]: """simple docstring""" return (generate_random_hand() for _ in range(A__ )) @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" assert PokerHand(A__ )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" assert PokerHand(A__ )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , A__ ) def __lowerCamelCase ( A__ , A__ , A__ ) -> str: """simple docstring""" UpperCamelCase = PokerHand(A__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> Dict: """simple docstring""" assert PokerHand(A__ )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , A__ ) def __lowerCamelCase ( A__ , A__ ) -> str: """simple docstring""" assert PokerHand(A__ )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , A__ ) def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple: """simple docstring""" assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]: """simple docstring""" assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected def __lowerCamelCase ( ) -> str: """simple docstring""" UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS] UpperCamelCase = poker_hands.copy() shuffle(A__ ) UpperCamelCase = chain(sorted(A__ ) ) for index, hand in enumerate(A__ ): assert hand == poker_hands[index] def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" # Test that five high straights are compared correctly. UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=A__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def __lowerCamelCase ( ) -> str: """simple docstring""" # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. UpperCamelCase = PokerHand('2C 4S AS 3D 5C' ) UpperCamelCase = True UpperCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def __lowerCamelCase ( ) -> List[str]: """simple docstring""" # Problem number 54 from Project Euler # Testing from poker_hands.txt file UpperCamelCase = 0 UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) ) UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' ) with open(A__ ) as file_hand: for line in file_hand: UpperCamelCase = line[:14].strip() UpperCamelCase = line[15:].strip() UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ ) UpperCamelCase = player.compare_with(A__ ) if output == "Win": answer += 1 assert answer == 376
28
0
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A__ ( unittest.TestCase ): @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.dummy_uncond_unet UpperCamelCase : str = KarrasVeScheduler() UpperCamelCase : List[str] = KarrasVePipeline(unet=A_ , scheduler=A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Tuple = torch.manual_seed(0 ) UpperCamelCase : Union[str, Any] = pipe(num_inference_steps=2 , generator=A_ , output_type="numpy" ).images UpperCamelCase : List[Any] = torch.manual_seed(0 ) UpperCamelCase : Any = pipe(num_inference_steps=2 , generator=A_ , output_type="numpy" , return_dict=A_ )[0] UpperCamelCase : str = image[0, -3:, -3:, -1] UpperCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = "google/ncsnpp-celebahq-256" UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained(A_ ) UpperCamelCase : Any = KarrasVeScheduler() UpperCamelCase : List[Any] = KarrasVePipeline(unet=A_ , scheduler=A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : List[str] = torch.manual_seed(0 ) UpperCamelCase : List[str] = pipe(num_inference_steps=20 , generator=A_ , output_type="numpy" ).images UpperCamelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCamelCase : int = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
52
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = None class SCREAMING_SNAKE_CASE ( _a , _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 2 @register_to_config def __init__( self : Union[str, Any] , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 1.0_0_7 , UpperCamelCase__ : float = 8_0 , UpperCamelCase__ : float = 0.0_5 , UpperCamelCase__ : float = 5_0 , ): """simple docstring""" UpperCamelCase = sigma_max # setable values UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None # sigma(t_i) def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ): """simple docstring""" return sample def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ): """simple docstring""" UpperCamelCase = num_inference_steps UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy() UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ ) UpperCamelCase = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] UpperCamelCase = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ ) def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : Optional[torch.Generator] = None ): """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: UpperCamelCase = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device ) UpperCamelCase = sigma + gamma * sigma UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = sample_hat + sigma_hat * model_output UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ): """simple docstring""" UpperCamelCase = sample_prev + sigma_prev * model_output UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ): """simple docstring""" raise NotImplementedError()
28
0
'''simple docstring''' def lowercase__ ( __lowercase : str , __lowercase : int ) -> list: """simple docstring""" __UpperCamelCase = word.split() def justify(__lowercase : list , __lowercase : int , __lowercase : int ) -> str: __UpperCamelCase = max_width - width __UpperCamelCase = len(__lowercase ) if len(__lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: __UpperCamelCase = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] __UpperCamelCase = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] __UpperCamelCase = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__lowercase ): num_spaces_between_words_list[i] += 1 __UpperCamelCase = [] for i in range(__lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__lowercase ) __UpperCamelCase = [] __UpperCamelCase = [] __UpperCamelCase = 0 for word in words: if width + len(__lowercase ) + len(__lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__lowercase ) width += len(__lowercase ) else: # justify the line and add it to result answer.append(justify(__lowercase , __lowercase , __lowercase ) ) # reset new line and new width __UpperCamelCase , __UpperCamelCase = [word], len(__lowercase ) __UpperCamelCase = max_width - width - len(__lowercase ) answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
53
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Tuple = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
0
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True}) snake_case__ : ClassVar[Features] = Features({"text": Value("string")}) snake_case__ : ClassVar[Features] = Features({}) snake_case__ : str = "text" @property def UpperCAmelCase_ ( self : str ) -> Dict[str, str]: return {self.text_column: "text"}
54
'''simple docstring''' def __lowerCamelCase ( A__ = 10**9 ) -> int: """simple docstring""" UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
28
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : List[Any] = { """vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""", # See all GLPN models at https://huggingface.co/models?filter=glpn } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "glpn" def __init__( self , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[2, 2, 2, 2] , UpperCamelCase=[8, 4, 2, 1] , UpperCamelCase=[32, 64, 160, 256] , UpperCamelCase=[7, 3, 3, 3] , UpperCamelCase=[4, 2, 2, 2] , UpperCamelCase=[1, 2, 5, 8] , UpperCamelCase=[4, 4, 4, 4] , UpperCamelCase="gelu" , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=0.1 , UpperCamelCase=1e-6 , UpperCamelCase=64 , UpperCamelCase=10 , UpperCamelCase=-1 , **UpperCamelCase , ): """simple docstring""" super().__init__(**UpperCamelCase ) lowerCamelCase_ = num_channels lowerCamelCase_ = num_encoder_blocks lowerCamelCase_ = depths lowerCamelCase_ = sr_ratios lowerCamelCase_ = hidden_sizes lowerCamelCase_ = patch_sizes lowerCamelCase_ = strides lowerCamelCase_ = mlp_ratios lowerCamelCase_ = num_attention_heads lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = initializer_range lowerCamelCase_ = drop_path_rate lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = decoder_hidden_size lowerCamelCase_ = max_depth lowerCamelCase_ = head_in_index
55
'''simple docstring''' import math class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1 """simple docstring""" UpperCamelCase = n UpperCamelCase = [ [math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ ) ] # adjacency matrix for weight UpperCamelCase = [ [math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ ) ] # dp[i][j] stores minimum distance from i to j def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = w def A ( self : str ): """simple docstring""" for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ): """simple docstring""" return self.dp[u][v] if __name__ == "__main__": _lowerCamelCase : List[str] = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
28
0
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class a ( _lowerCamelCase ): def __init__( self : str , lowercase_ : Union[str, "sqlalchemy.sql.Selectable"] , lowercase_ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , **lowercase_ : Optional[int] , ): super().__init__(features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , **lowercase_ ) snake_case_ = Sql( cache_dir=lowercase_ , features=lowercase_ , sql=lowercase_ , con=lowercase_ , **lowercase_ , ) def A_ ( self : List[Any] ): snake_case_ = None snake_case_ = None snake_case_ = None snake_case_ = None self.builder.download_and_prepare( download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , ) # Build dataset for splits snake_case_ = self.builder.as_dataset( split='''train''' , verification_mode=lowercase_ , in_memory=self.keep_in_memory ) return dataset class a : def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : str , lowercase_ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , **lowercase_ : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"num_proc {num_proc} must be an integer > 0." ) snake_case_ = dataset snake_case_ = name snake_case_ = con snake_case_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE snake_case_ = num_proc snake_case_ = to_sql_kwargs def A_ ( self : str ): snake_case_ = self.to_sql_kwargs.pop('''sql''' , lowercase_ ) snake_case_ = self.to_sql_kwargs.pop('''con''' , lowercase_ ) snake_case_ = self.to_sql_kwargs.pop('''index''' , lowercase_ ) snake_case_ = self._write(index=lowercase_ , **self.to_sql_kwargs ) return written def A_ ( self : List[str] , lowercase_ : Any ): snake_case_ ,snake_case_ ,snake_case_ = args snake_case_ = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs snake_case_ = query_table( table=self.dataset.data , key=slice(lowercase_ , offset + self.batch_size ) , indices=self.dataset._indices , ) snake_case_ = batch.to_pandas() snake_case_ = df.to_sql(self.name , self.con , index=lowercase_ , **lowercase_ ) return num_rows or len(lowercase_ ) def A_ ( self : Optional[int] , lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ): snake_case_ = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: snake_case_ ,snake_case_ = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowercase_ , lowercase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
56
'''simple docstring''' _lowerCamelCase : int = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
28
0
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING A : List[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , *__a , **__a ): super().__init__(*__a , **__a ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def snake_case ( self , __a=None ): __lowerCAmelCase = {} if top_k is not None: __lowerCAmelCase = top_k return {}, {}, postprocess_params def __call__( self , __a , **__a ): return super().__call__(__a , **__a ) def snake_case ( self , __a ): __lowerCAmelCase = load_image(__a ) __lowerCAmelCase = self.image_processor(images=__a , return_tensors=self.framework ) return model_inputs def snake_case ( self , __a ): __lowerCAmelCase = self.model(**__a ) return model_outputs def snake_case ( self , __a , __a=5 ): if top_k > self.model.config.num_labels: __lowerCAmelCase = self.model.config.num_labels if self.framework == "pt": __lowerCAmelCase = model_outputs.logits.softmax(-1 )[0] __lowerCAmelCase , __lowerCAmelCase = probs.topk(__a ) elif self.framework == "tf": __lowerCAmelCase = stable_softmax(model_outputs.logits , axis=-1 )[0] __lowerCAmelCase = tf.math.top_k(__a , k=__a ) __lowerCAmelCase , __lowerCAmelCase = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"Unsupported framework: {self.framework}" ) __lowerCAmelCase = scores.tolist() __lowerCAmelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
57
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCamelCase : List[Any] = { "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
0
'''simple docstring''' from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class a_ ( snake_case_ ): '''simple docstring''' UpperCamelCase = ['''image_processor'''] UpperCamelCase = '''SamImageProcessor''' def __init__( self , A ) -> Any: super().__init__(A ) _SCREAMING_SNAKE_CASE = self.image_processor _SCREAMING_SNAKE_CASE = -10 _SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""] def __call__( self , A=None , A=None , A=None , A=None , A = None , **A , ) -> BatchEncoding: _SCREAMING_SNAKE_CASE = self.image_processor( A , return_tensors=A , **A , ) # pop arguments that are not used in the foward but used nevertheless _SCREAMING_SNAKE_CASE = encoding_image_processor["""original_sizes"""] if hasattr(A , """numpy""" ): # Checks if Torch or TF tensor _SCREAMING_SNAKE_CASE = original_sizes.numpy() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._check_and_preprocess_points( input_points=A , input_labels=A , input_boxes=A , ) _SCREAMING_SNAKE_CASE = self._normalize_and_convert( A , A , input_points=A , input_labels=A , input_boxes=A , return_tensors=A , ) return encoding_image_processor def snake_case_( self , A , A , A=None , A=None , A=None , A="pt" , ) -> int: if input_points is not None: if len(A ) != len(A ): _SCREAMING_SNAKE_CASE = [ self._normalize_coordinates(self.target_size , A , original_sizes[0] ) for point in input_points ] else: _SCREAMING_SNAKE_CASE = [ self._normalize_coordinates(self.target_size , A , A ) for point, original_size in zip(A , A ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._pad_points_and_labels(A , A ) _SCREAMING_SNAKE_CASE = np.array(A ) if input_labels is not None: _SCREAMING_SNAKE_CASE = np.array(A ) if input_boxes is not None: if len(A ) != len(A ): _SCREAMING_SNAKE_CASE = [ self._normalize_coordinates(self.target_size , A , original_sizes[0] , is_bounding_box=A ) for box in input_boxes ] else: _SCREAMING_SNAKE_CASE = [ self._normalize_coordinates(self.target_size , A , A , is_bounding_box=A ) for box, original_size in zip(A , A ) ] _SCREAMING_SNAKE_CASE = np.array(A ) if input_boxes is not None: if return_tensors == "pt": _SCREAMING_SNAKE_CASE = torch.from_numpy(A ) # boxes batch size of 1 by default _SCREAMING_SNAKE_CASE = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": _SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A ) # boxes batch size of 1 by default _SCREAMING_SNAKE_CASE = tf.expand_dims(A , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"""input_boxes""": input_boxes} ) if input_points is not None: if return_tensors == "pt": _SCREAMING_SNAKE_CASE = torch.from_numpy(A ) # point batch size of 1 by default _SCREAMING_SNAKE_CASE = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": _SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A ) # point batch size of 1 by default _SCREAMING_SNAKE_CASE = tf.expand_dims(A , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"""input_points""": input_points} ) if input_labels is not None: if return_tensors == "pt": _SCREAMING_SNAKE_CASE = torch.from_numpy(A ) # point batch size of 1 by default _SCREAMING_SNAKE_CASE = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": _SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A ) # point batch size of 1 by default _SCREAMING_SNAKE_CASE = tf.expand_dims(A , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"""input_labels""": input_labels} ) return encoding_image_processor def snake_case_( self , A , A ) -> Dict: _SCREAMING_SNAKE_CASE = max([point.shape[0] for point in input_points] ) _SCREAMING_SNAKE_CASE = [] for i, point in enumerate(A ): if point.shape[0] != expected_nb_points: _SCREAMING_SNAKE_CASE = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) _SCREAMING_SNAKE_CASE = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(A ) _SCREAMING_SNAKE_CASE = processed_input_points return input_points, input_labels def snake_case_( self , A , A , A , A=False ) -> np.ndarray: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = original_size _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor._get_preprocess_shape(A , longest_edge=A ) _SCREAMING_SNAKE_CASE = deepcopy(A ).astype(A ) if is_bounding_box: _SCREAMING_SNAKE_CASE = coords.reshape(-1 , 2 , 2 ) _SCREAMING_SNAKE_CASE = coords[..., 0] * (new_w / old_w) _SCREAMING_SNAKE_CASE = coords[..., 1] * (new_h / old_h) if is_bounding_box: _SCREAMING_SNAKE_CASE = coords.reshape(-1 , 4 ) return coords def snake_case_( self , A=None , A=None , A=None , ) -> List[str]: if input_points is not None: if hasattr(A , """numpy""" ): # Checks for TF or Torch tensor _SCREAMING_SNAKE_CASE = input_points.numpy().tolist() if not isinstance(A , A ) or not isinstance(input_points[0] , A ): raise ValueError("""Input points must be a list of list of floating points.""" ) _SCREAMING_SNAKE_CASE = [np.array(A ) for input_point in input_points] else: _SCREAMING_SNAKE_CASE = None if input_labels is not None: if hasattr(A , """numpy""" ): _SCREAMING_SNAKE_CASE = input_labels.numpy().tolist() if not isinstance(A , A ) or not isinstance(input_labels[0] , A ): raise ValueError("""Input labels must be a list of list integers.""" ) _SCREAMING_SNAKE_CASE = [np.array(A ) for label in input_labels] else: _SCREAMING_SNAKE_CASE = None if input_boxes is not None: if hasattr(A , """numpy""" ): _SCREAMING_SNAKE_CASE = input_boxes.numpy().tolist() if ( not isinstance(A , A ) or not isinstance(input_boxes[0] , A ) or not isinstance(input_boxes[0][0] , A ) ): raise ValueError("""Input boxes must be a list of list of list of floating points.""" ) _SCREAMING_SNAKE_CASE = [np.array(A ).astype(np.floataa ) for box in input_boxes] else: _SCREAMING_SNAKE_CASE = None return input_points, input_labels, input_boxes @property def snake_case_( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = self.image_processor.model_input_names return list(dict.fromkeys(A ) ) def snake_case_( self , *A , **A ) -> str: return self.image_processor.post_process_masks(*A , **A )
58
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict: """simple docstring""" UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T return jnp.matmul(A__ , norm_emb_a.T ) class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = jnp.floataa def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config ) UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype ) UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) ) UpperCamelCase = self.param( 'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) ) UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) ) UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) ) def __call__( self : str , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = self.vision_model(UpperCamelCase__ )[1] UpperCamelCase = self.visual_projection(UpperCamelCase__ ) UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds ) UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs UpperCamelCase = 0.0 UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment UpperCamelCase = jnp.round(UpperCamelCase__ , 3 ) UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ ) # Use a lower threshold if an image has any special care concept UpperCamelCase = is_special_care * 0.0_1 UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment UpperCamelCase = jnp.round(UpperCamelCase__ , 3 ) UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = CLIPConfig _SCREAMING_SNAKE_CASE = """clip_input""" _SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ): """simple docstring""" if input_shape is None: UpperCamelCase = (1, 2_2_4, 2_2_4, 3) UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ ) super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init ) def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ): """simple docstring""" UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ ) UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng} UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params'] return random_params def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ): """simple docstring""" UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) ) return self.module.apply( {'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
28
0
def UpperCamelCase ( __lowerCamelCase : list ): if any(not isinstance(__lowerCamelCase , __lowerCamelCase ) or x < 0 for x in sequence ): raise TypeError("Sequence must be list of non-negative integers" ) for _ in range(len(__lowerCamelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCamelCase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
59
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _lowerCamelCase : str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ): """simple docstring""" warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
28
0
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging snake_case__ : List[str] = logging.get_logger(__name__) class snake_case_( a__ ): __UpperCamelCase = ['''input_features''', '''is_longer'''] def __init__( self : int , UpperCamelCase_ : Any=6_4 , UpperCamelCase_ : Dict=4_8_0_0_0 , UpperCamelCase_ : Dict=4_8_0 , UpperCamelCase_ : Optional[int]=1_0 , UpperCamelCase_ : str=1_0_2_4 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4_0_0_0 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : List[str] , ): super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : str = top_db lowerCAmelCase : str = truncation lowerCAmelCase : Optional[Any] = padding lowerCAmelCase : List[Any] = fft_window_size lowerCAmelCase : Any = (fft_window_size >> 1) + 1 lowerCAmelCase : Union[str, Any] = hop_length lowerCAmelCase : int = max_length_s lowerCAmelCase : str = max_length_s * sampling_rate lowerCAmelCase : Any = sampling_rate lowerCAmelCase : str = frequency_min lowerCAmelCase : List[str] = frequency_max lowerCAmelCase : Optional[int] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='''htk''' , ) lowerCAmelCase : Union[str, Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ ) lowerCAmelCase : Optional[int] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def lowerCamelCase__ ( self : Any , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ): lowerCAmelCase : List[str] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='''dB''' , ) return log_mel_spectrogram.T def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Any ): lowerCAmelCase : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowerCAmelCase : Union[str, Any] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowerCAmelCase : List[Any] = [0] # randomly choose index for each part lowerCAmelCase : Dict = np.random.choice(ranges[0] ) lowerCAmelCase : str = np.random.choice(ranges[1] ) lowerCAmelCase : List[Any] = np.random.choice(ranges[2] ) lowerCAmelCase : Any = mel[idx_front : idx_front + chunk_frames, :] lowerCAmelCase : int = mel[idx_middle : idx_middle + chunk_frames, :] lowerCAmelCase : Optional[int] = mel[idx_back : idx_back + chunk_frames, :] lowerCAmelCase : List[Any] = torch.tensor(mel[None, None, :] ) lowerCAmelCase : Optional[int] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = mel_shrink[0][0].numpy() lowerCAmelCase : List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowerCAmelCase : List[str] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowerCAmelCase : Optional[int] = len(UpperCamelCase_ ) - max_length lowerCAmelCase : List[str] = np.random.randint(0 , overflow + 1 ) lowerCAmelCase : List[Any] = waveform[idx : idx + max_length] lowerCAmelCase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowerCAmelCase : List[str] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) lowerCAmelCase : Union[str, Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowerCAmelCase : Tuple = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowerCAmelCase : Union[str, Any] = np.stack([mel, mel, mel, mel] , axis=0 ) lowerCAmelCase : Union[str, Any] = False else: lowerCAmelCase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Any = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: lowerCAmelCase : Any = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowerCAmelCase : Optional[Any] = int(max_length / len(UpperCamelCase_ ) ) lowerCAmelCase : Optional[int] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowerCAmelCase : Any = int(max_length / len(UpperCamelCase_ ) ) lowerCAmelCase : Tuple = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : Any = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": lowerCAmelCase : Optional[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) lowerCAmelCase : List[str] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: lowerCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : Optional[int] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Optional[Any] , ): lowerCAmelCase : Optional[int] = truncation if truncation is not None else self.truncation lowerCAmelCase : str = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) lowerCAmelCase : Tuple = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowerCAmelCase : Any = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ): lowerCAmelCase : List[str] = np.asarray(UpperCamelCase_ , dtype=np.floataa ) elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase : Tuple = [np.asarray(UpperCamelCase_ )] # convert to mel spectrogram, truncate and pad if needed. lowerCAmelCase : Union[str, Any] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ ) for waveform in raw_speech ] lowerCAmelCase : str = [] lowerCAmelCase : List[Any] = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_ ) is_longer.append(UpperCamelCase_ ) if truncation == "fusion" and sum(UpperCamelCase_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowerCAmelCase : List[Any] = np.random.randint(0 , len(UpperCamelCase_ ) ) lowerCAmelCase : Optional[Any] = True if isinstance(input_mel[0] , UpperCamelCase_ ): lowerCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowerCAmelCase : Any = [[longer] for longer in is_longer] lowerCAmelCase : str = {'''input_features''': input_mel, '''is_longer''': is_longer} lowerCAmelCase : int = BatchFeature(UpperCamelCase_ ) if return_tensors is not None: lowerCAmelCase : Dict = input_features.convert_to_tensors(UpperCamelCase_ ) return input_features
60
'''simple docstring''' import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed _lowerCamelCase : Optional[int] = logging.getLogger(__name__) def __lowerCamelCase ( A__=2 , A__=3 , A__=16 , A__ = 10 , A__ = 2 ) -> int: """simple docstring""" def get_dataset(A__ ): UpperCamelCase = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCamelCase = get_dataset(A__ ) UpperCamelCase = get_dataset(A__ ) UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__=None ) -> int: """simple docstring""" UpperCamelCase = [] for epoch in range(A__ ): # Train quickly model.train() for batch in dataloader: UpperCamelCase , UpperCamelCase = batch UpperCamelCase = model(A__ ) UpperCamelCase = torch.nn.functional.mse_loss(A__ , A__ ) accelerator.backward(A__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self : Tuple ): """simple docstring""" super().__init__() UpperCamelCase = nn.Parameter(torch.randn(1 ) ) UpperCamelCase = nn.Parameter(torch.randn(1 ) ) def A ( self : str , UpperCamelCase__ : Dict ): """simple docstring""" return x * self.a + self.b class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def A ( self : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase__ , automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def A ( self : Optional[int] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() # Train baseline UpperCamelCase = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial UpperCamelCase = os.path.join(UpperCamelCase__ , 'initial' ) accelerator.save_state(UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) accelerator.load_state(UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save everything UpperCamelCase = os.path.join(UpperCamelCase__ , 'checkpoint' ) accelerator.save_state(UpperCamelCase__ ) # Load everything back in and make sure all states work accelerator.load_state(UpperCamelCase__ ) test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase__ ) UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item() UpperCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch.tensor([1, 2, 3] ) UpperCamelCase = torch.tensor([2, 3, 4] ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(net.parameters() ) UpperCamelCase = Accelerator() with self.assertRaises(UpperCamelCase__ ) as ve: accelerator.register_for_checkpointing(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def A ( self : Dict ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase__ , step_size=1 , gamma=0.9_9 ) UpperCamelCase , UpperCamelCase = dummy_dataloaders() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save initial accelerator.save_state() UpperCamelCase = scheduler.state_dict() train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertNotEqual(UpperCamelCase__ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(UpperCamelCase__ , scheduler.state_dict() ) def A ( self : List[str] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCamelCase = DummyModel() UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ , total_limit=2 ) # Train baseline UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ ) UpperCamelCase = accelerator.prepare(UpperCamelCase__ ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def A ( self : Dict ): """simple docstring""" UpperCamelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() ) if __name__ == "__main__": _lowerCamelCase : Optional[int] = "/tmp/accelerate/state_checkpointing" _lowerCamelCase : Union[str, Any] = DummyModel() _lowerCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3) _lowerCamelCase : List[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) _lowerCamelCase ,_lowerCamelCase : Tuple = dummy_dataloaders() _lowerCamelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline _lowerCamelCase : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Union[str, Any] = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) _lowerCamelCase ,_lowerCamelCase : Tuple = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: _lowerCamelCase : Any = group["params"][0].device break assert param_device.type == accelerator.device.type _lowerCamelCase : Tuple = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: _lowerCamelCase : Optional[Any] = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: _lowerCamelCase : Dict = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
28
0