code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return number | (1 << position)
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return number & ~(1 << position)
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return number ^ (1 << position)
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return ((number >> position) & 1) == 1
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def a ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case__ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case__ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case__ , default=0 , help='''cuda_id.''' , )
lowercase_ = parser.parse_args()
return args
def a ( snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Union[str, Any] ):
'''simple docstring'''
if not len(snake_case__ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowercase_ , lowercase_ = imgs[0].size
lowercase_ = Image.new('''RGB''' , size=(cols * w, rows * h) )
lowercase_ , lowercase_ = grid.size
for i, img in enumerate(snake_case__ ):
grid.paste(snake_case__ , box=(i % cols * w, i // cols * h) )
return grid
def a ( snake_case__: Tuple , snake_case__: Union[str, Any]="robotic cat with wings" , snake_case__: Union[str, Any]=7.5 , snake_case__: List[str]=50 , snake_case__: List[Any]=1 , snake_case__: Optional[int]=42 , ):
'''simple docstring'''
lowercase_ = torch.Generator(pipeline.device ).manual_seed(snake_case__ )
lowercase_ = pipeline(
snake_case__ , guidance_scale=snake_case__ , num_inference_steps=snake_case__ , generator=snake_case__ , num_images_per_prompt=snake_case__ , ).images
lowercase_ = int(math.sqrt(snake_case__ ) )
lowercase_ = image_grid(snake_case__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__a = parse_args()
# Load models and create wrapper for stable diffusion
__a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__a = unet.to(torch.device('cuda', args.cuda_id))
__a = pipeline.to(unet.device)
__a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 30 | 1 |
from __future__ import annotations
def __UpperCamelCase ( lowerCAmelCase__ : list[list[int]] ):
__a : List[Any] = len(lowerCAmelCase__ )
# We need to create solution object to save path.
__a : str = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
__a : Tuple = run_maze(lowerCAmelCase__ , 0 , 0 , lowerCAmelCase__ )
if solved:
print('''\n'''.join(str(lowerCAmelCase__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __UpperCamelCase ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[list[int]] ):
__a : str = len(lowerCAmelCase__ )
# Final check point.
if i == j == (size - 1):
__a : int = 1
return True
__a : Tuple = (not i < 0) and (not j < 0) # Check lower bounds
__a : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__a : List[str] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__a : Optional[Any] = 1
# check for directions
if (
run_maze(lowerCAmelCase__ , i + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
or run_maze(lowerCAmelCase__ , lowerCAmelCase__ , j + 1 , lowerCAmelCase__ )
or run_maze(lowerCAmelCase__ , i - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
or run_maze(lowerCAmelCase__ , lowerCAmelCase__ , j - 1 , lowerCAmelCase__ )
):
return True
__a : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : Union[str, Any] = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ )
else:
__a : str = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ )
for i, tensor in enumerate(lowerCAmelCase__ ):
if padding_side == "right":
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : Any = tensor[:sequence_length]
else:
__a : List[Any] = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : Dict = tensor[:sequence_length]
else:
__a : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
__a : str = ord(lowerCAmelCase__ )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
__a : List[str] = unicodedata.category(lowerCAmelCase__ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase
_SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : int = -100
_SCREAMING_SNAKE_CASE : str = "pt"
def lowerCAmelCase (self : str , snake_case_ : Tuple ):
import torch
__a : Union[str, Any] = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a : Tuple = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a : Union[str, Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a : List[str] = torch.tensor(batch['''entity_ids'''] ).shape[1]
__a : Tuple = self.tokenizer.padding_side
if padding_side == "right":
__a : Union[str, Any] = [
list(snake_case_ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) for label in labels
]
else:
__a : Dict = [
[self.label_pad_token_id] * (sequence_length - len(snake_case_ )) + list(snake_case_ ) for label in labels
]
__a : Dict = [feature['''ner_tags'''] for feature in features]
__a : Optional[Any] = padding_tensor(snake_case_ , -1 , snake_case_ , snake_case_ )
__a : Union[str, Any] = [feature['''original_entity_spans'''] for feature in features]
__a : Optional[int] = padding_tensor(snake_case_ , (-1, -1) , snake_case_ , snake_case_ )
__a : List[str] = {k: torch.tensor(snake_case_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 90 | 1 |
import math
from collections.abc import Callable
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : float = xa
_lowerCAmelCase : float = xa
while True:
if x_n == x_na or function(_lowerCamelCase ) == function(_lowerCamelCase ):
raise ZeroDivisionError("float division by zero, could not find root" )
_lowerCAmelCase : float = x_na - (
function(_lowerCamelCase ) / ((function(_lowerCamelCase ) - function(_lowerCamelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_lowerCAmelCase : Any = x_na
_lowerCAmelCase : Tuple = x_na
def A ( _lowerCamelCase ):
'''simple docstring'''
return math.pow(_lowerCamelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 36 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Union[str, Any] = feat_extract_activation
_lowerCAmelCase : Optional[Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : str = num_conv_pos_embeddings
_lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : str = len(self.conv_dim)
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : int = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : int = layerdrop
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[Any] = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : str = apply_spec_augment
_lowerCAmelCase : Optional[Any] = mask_time_prob
_lowerCAmelCase : Optional[int] = mask_time_length
_lowerCAmelCase : List[str] = mask_time_min_masks
_lowerCAmelCase : Optional[int] = mask_feature_prob
_lowerCAmelCase : Optional[int] = mask_feature_length
_lowerCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Union[str, Any] = num_codevectors_per_group
_lowerCAmelCase : str = num_codevector_groups
_lowerCAmelCase : Optional[int] = contrastive_logits_temperature
_lowerCAmelCase : Optional[int] = feat_quantizer_dropout
_lowerCAmelCase : Optional[int] = num_negatives
_lowerCAmelCase : Union[str, Any] = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Tuple = ctc_loss_reduction
_lowerCAmelCase : Tuple = ctc_zero_infinity
# adapter
_lowerCAmelCase : List[Any] = add_adapter
_lowerCAmelCase : List[str] = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[str] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : Tuple = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : Union[str, Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 36 | 1 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ = "" , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase : List[str] = is_leaf
lowerCAmelCase : Any = prefix
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = 0
for q, w in zip(self.prefix , UpperCamelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
for word in words:
self.insert(UpperCamelCase__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.prefix == word:
lowerCAmelCase : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase : Union[str, Any] = RadixNode(prefix=UpperCamelCase__ , is_leaf=UpperCamelCase__ )
else:
lowerCAmelCase : Optional[int] = self.nodes[word[0]]
lowerCAmelCase : Dict = incoming_node.match(
UpperCamelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCamelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase : str = remaining_prefix
lowerCAmelCase : str = self.nodes[matching_string[0]]
lowerCAmelCase : Union[str, Any] = RadixNode(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase : Any = aux_node
if remaining_word == "":
lowerCAmelCase : Dict = True
else:
self.nodes[matching_string[0]].insert(UpperCamelCase__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.nodes.get(word[0] , UpperCamelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase : Optional[int] = incoming_node.match(
UpperCamelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCamelCase__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.nodes.get(word[0] , UpperCamelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase : Tuple = incoming_node.match(
UpperCamelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCamelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase : Optional[int] = list(self.nodes.values() )[0]
lowerCAmelCase : Any = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase : Any = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase : str = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase : int = list(incoming_node.nodes.values() )[0]
lowerCAmelCase : List[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase : str = merging_node.nodes
return True
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase : Tuple = RadixNode()
root.insert_many(__SCREAMING_SNAKE_CASE )
assert all(root.find(__SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def a__ ( ):
'''simple docstring'''
assert test_trie()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = RadixNode()
lowerCAmelCase : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(__SCREAMING_SNAKE_CASE )
print("Words:" , __SCREAMING_SNAKE_CASE )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 370 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =KandinskyVaaImgaImgPipeline
a : Optional[int] =["image_embeds", "negative_image_embeds", "image"]
a : Optional[int] =[
"image_embeds",
"negative_image_embeds",
"image",
]
a : str =[
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a : Dict =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 100
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase : int = UNetaDConditionModel(**snake_case__ )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.dummy_unet
lowerCAmelCase : Optional[int] = self.dummy_movq
lowerCAmelCase : List[str] = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCAmelCase : Tuple = DDIMScheduler(**snake_case__ )
lowerCAmelCase : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : List[str] = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((256, 256) )
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : List[str] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = "cpu"
lowerCAmelCase : Dict = self.get_dummy_components()
lowerCAmelCase : Union[str, Any] = self.pipeline_class(**snake_case__ )
lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCAmelCase : Union[str, Any] = output.images
lowerCAmelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : int = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
lowerCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase : Optional[Any] = "A red cartoon frog, 4k"
lowerCAmelCase : int = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCAmelCase : List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCAmelCase : Tuple = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase : Tuple = pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 133 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase :Dict = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase :Dict = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowerCAmelCase :List[str] = os.environ.get('''USER_TOKEN''', '''''')
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Optional[Any] = {
'Authorization': f'token {auth_token}',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(lowerCAmelCase , headers=lowerCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''') | 331 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
return 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int = 200 ):
"""simple docstring"""
return two_pound(lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 331 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=[] ) -> str:
_lowercase : Any = size[0] - overlap_pixels * 2
_lowercase : List[str] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowercase : Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_lowercase : Optional[Any] = np.pad(_UpperCAmelCase , mode='linear_ramp' , pad_width=_UpperCAmelCase , end_values=0 )
if "l" in remove_borders:
_lowercase : Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowercase : Tuple = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowercase : Any = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowercase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
return max(_UpperCAmelCase , min(_UpperCAmelCase , _UpperCAmelCase ) )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : List[Any] = list(_UpperCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowercase : Tuple = clamp_rect(_UpperCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Union[str, Any] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_UpperCAmelCase , (original_slice, 0) )
return result
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Tuple = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowercase : Dict = tile.crop(_UpperCAmelCase )
return tile
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase : List[str] = n % d
return n - divisor
class _lowerCamelCase( SCREAMING_SNAKE_CASE_ ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 3_50, ) -> Any:
"""simple docstring"""
super().__init__(
vae=__a, text_encoder=__a, tokenizer=__a, unet=__a, low_res_scheduler=__a, scheduler=__a, max_noise_level=__a, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = (
min(image.size[0] - (tile_size + original_image_slice), x * tile_size),
min(image.size[1] - (tile_size + original_image_slice), y * tile_size),
min(image.size[0], (x + 1) * tile_size),
min(image.size[1], (y + 1) * tile_size),
)
_lowercase : Tuple = add_overlap_rect(__a, __a, image.size)
_lowercase : Optional[Any] = image.crop(__a)
_lowercase : Optional[Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowercase : Optional[int] = translated_slice_x - (original_image_slice / 2)
_lowercase : Tuple = max(0, __a)
_lowercase : Optional[int] = squeeze_tile(__a, __a, __a, __a)
_lowercase : Optional[int] = to_input.size
_lowercase : Optional[Any] = to_input.resize((tile_size, tile_size), Image.BICUBIC)
_lowercase : Union[str, Any] = super(__a, self).__call__(image=__a, **__a).images[0]
_lowercase : List[Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4), Image.BICUBIC)
_lowercase : Optional[int] = unsqueeze_tile(__a, __a)
_lowercase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4), Image.BICUBIC)
_lowercase : Dict = []
if x == 0:
remove_borders.append('l')
elif crop_rect[2] == image.size[0]:
remove_borders.append('r')
if y == 0:
remove_borders.append('t')
elif crop_rect[3] == image.size[1]:
remove_borders.append('b')
_lowercase : Optional[Any] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]), tile_border * 4, remove_borders=__a), mode='L', )
final_image.paste(
__a, (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4), __a)
@torch.no_grad()
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 75, lowerCamelCase = 9.0, lowerCamelCase = 50, lowerCamelCase = None, lowerCamelCase = 1, lowerCamelCase = 0.0, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = 1, lowerCamelCase = 1_28, lowerCamelCase = 32, lowerCamelCase = 32, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = Image.new('RGB', (image.size[0] * 4, image.size[1] * 4))
_lowercase : List[str] = math.ceil(image.size[0] / tile_size)
_lowercase : List[str] = math.ceil(image.size[1] / tile_size)
_lowercase : int = tcx * tcy
_lowercase : int = 0
for y in range(__a):
for x in range(__a):
self._process_tile(
__a, __a, __a, __a, __a, __a, __a, prompt=__a, num_inference_steps=__a, guidance_scale=__a, noise_level=__a, negative_prompt=__a, num_images_per_prompt=__a, eta=__a, generator=__a, latents=__a, )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image})
return final_image
def UpperCamelCase_( ) -> int:
# Run a demo
_lowercase : Optional[Any] = 'stabilityai/stable-diffusion-x4-upscaler'
_lowercase : str = StableDiffusionTiledUpscalePipeline.from_pretrained(_UpperCAmelCase , revision='fp16' , torch_dtype=torch.floataa )
_lowercase : Tuple = pipe.to('cuda' )
_lowercase : Any = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(lowerCamelCase_ ):
print(F'''progress: {obj["progress"]:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
_lowercase : Tuple = pipe(image=_UpperCAmelCase , prompt='Black font, white background, vector' , noise_level=40 , callback=_UpperCAmelCase )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 350 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = ["DeiTFeatureExtractor"]
SCREAMING_SNAKE_CASE : Dict = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 84 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = ConsistencyModelPipeline
a_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
@property
def A ( self : Any ) -> str:
UpperCAmelCase_ : Any = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def A ( self : Optional[int] ) -> List[str]:
UpperCAmelCase_ : int = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def A ( self : Dict , _A : List[str]=False ) -> int:
if class_cond:
UpperCAmelCase_ : str = self.dummy_cond_unet
else:
UpperCAmelCase_ : Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase_ : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_ : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def A ( self : int , _A : Dict , _A : List[str]=0 ) -> Optional[int]:
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : int = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Tuple = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : str = ConsistencyModelPipeline(**_A )
UpperCAmelCase_ : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : List[Any] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[str] = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : List[str] ) -> str:
UpperCAmelCase_ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components(class_cond=_A )
UpperCAmelCase_ : Optional[int] = ConsistencyModelPipeline(**_A )
UpperCAmelCase_ : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_A )
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Tuple = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : int = ConsistencyModelPipeline(**_A )
UpperCAmelCase_ : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : int = self.get_dummy_inputs(_A )
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : int = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : int = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : str ) -> Any:
UpperCAmelCase_ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components(class_cond=_A )
UpperCAmelCase_ : Optional[Any] = ConsistencyModelPipeline(**_A )
UpperCAmelCase_ : Union[str, Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : int = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Optional[int] = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase):
def A ( self : List[str] ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Tuple , _A : Tuple=0 , _A : Tuple=False , _A : int="cpu" , _A : Any=torch.floataa , _A : Dict=(1, 3, 64, 64) ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A )
UpperCAmelCase_ : List[Any] = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
UpperCAmelCase_ : str = self.get_fixed_latents(seed=_A , device=_A , dtype=_A , shape=_A )
UpperCAmelCase_ : int = latents
return inputs
def A ( self : List[Any] , _A : Tuple=0 , _A : Optional[Any]="cpu" , _A : Union[str, Any]=torch.floataa , _A : List[Any]=(1, 3, 64, 64) ) -> Any:
if type(_A ) == str:
UpperCAmelCase_ : str = torch.device(_A )
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : List[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
return latents
def A ( self : int ) -> List[Any]:
UpperCAmelCase_ : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
UpperCAmelCase_ : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_ : List[Any] = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Any = self.get_inputs()
UpperCAmelCase_ : str = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def A ( self : List[Any] ) -> int:
UpperCAmelCase_ : Optional[int] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
UpperCAmelCase_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_ : Dict = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Tuple = self.get_inputs()
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : str = None
UpperCAmelCase_ : str = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def A ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : str = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
UpperCAmelCase_ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_ : List[str] = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : List[Any] = self.get_inputs(get_fixed_latents=_A , device=_A )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ):
UpperCAmelCase_ : Optional[int] = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def A ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
UpperCAmelCase_ : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_ : str = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : int = self.get_inputs(get_fixed_latents=_A , device=_A )
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Dict = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ):
UpperCAmelCase_ : Union[str, Any] = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 304 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : int ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : int = False
return models_are_equal
@require_flax
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(_A )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) )
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Tuple = FlaxBertModel(_A )
UpperCAmelCase_ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : int = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
| 304 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCamelCase: List[str] = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_UpperCamelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowercase__ ( ) -> Dict:
'''simple docstring'''
lowercase : List[Any] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
lowercase : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
return image
def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = dct.pop(_UpperCAmelCase )
lowercase : Tuple = val
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase : Optional[int] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase : int = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase : List[Any] = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase ), v_bias) )
lowercase : Optional[Any] = qkv_bias
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] = 3_64 if 'coco' in model_name else 2_24
lowercase : int = BlipaVisionConfig(image_size=_UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase : Optional[int] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_UpperCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowercase : List[str] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_UpperCAmelCase ).to_dict()
elif "t5-xl" in model_name:
lowercase : int = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase : Optional[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
lowercase : int = BlipaConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase )
return config, image_size
@torch.no_grad()
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowercase : Any = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
lowercase : Any = tokenizer('\n' , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase , lowercase : Union[str, Any] = get_blipa_config(_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase : Any = BlipaForConditionalGeneration(_UpperCAmelCase ).eval()
lowercase : Any = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
lowercase , lowercase : Optional[int] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowercase : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
lowercase , lowercase , lowercase : List[str] = load_model_and_preprocess(
name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
lowercase : int = original_model.state_dict()
lowercase : str = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase : Dict = state_dict.pop(_UpperCAmelCase )
if key.startswith('Qformer.bert' ):
lowercase : List[Any] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowercase : List[Any] = key.replace('self' , 'attention' )
if "opt_proj" in key:
lowercase : Any = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
lowercase : List[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
lowercase : Optional[Any] = key.replace('opt' , 'language' )
if key.startswith('t5' ):
lowercase : Optional[Any] = key.replace('t5' , 'language' )
lowercase : Tuple = val
# read in qv biases
read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase )
lowercase , lowercase : str = hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert len(_UpperCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase : List[Any] = load_demo_image()
lowercase : Optional[Any] = vis_processors['eval'](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
lowercase : str = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
# create processor
lowercase : List[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase )
lowercase : Union[str, Any] = BlipaProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
lowercase : Tuple = processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
original_model.to(_UpperCAmelCase )
hf_model.to(_UpperCAmelCase )
with torch.no_grad():
if "opt" in model_name:
lowercase : Any = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
lowercase : str = hf_model(_UpperCAmelCase , _UpperCAmelCase ).logits
else:
lowercase : Tuple = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
lowercase : Dict = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
lowercase : Tuple = hf_model(_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase : str = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=_UpperCAmelCase )
assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase : Any = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=_UpperCAmelCase )
else:
# cast to same type
lowercase : Dict = logits.dtype
assert torch.allclose(original_logits.to(_UpperCAmelCase ) , _UpperCAmelCase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
lowercase : str = ''
lowercase : List[str] = tokenizer(_UpperCAmelCase , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
lowercase : Any = original_model.generate({'image': original_pixel_values} )
lowercase : Union[str, Any] = hf_model.generate(
_UpperCAmelCase , _UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _UpperCAmelCase )
lowercase : str = input_ids.shape[1]
lowercase : Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_UpperCAmelCase )
lowercase : Optional[int] = [text.strip() for text in output_text]
print('HF generation:' , _UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_UpperCamelCase: Optional[Any] = argparse.ArgumentParser()
_UpperCamelCase: Dict = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
_UpperCamelCase: int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 53 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCamelCase_ ( _a : Optional[Any] ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
UpperCAmelCase_ : Optional[int] = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
UpperCAmelCase_ : Optional[int] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
UpperCAmelCase_ : List[str] = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
UpperCAmelCase_ : Any = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
UpperCAmelCase_ : Tuple = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
UpperCAmelCase_ : List[Any] = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
UpperCAmelCase_ : Optional[Any] = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
UpperCAmelCase_ : List[str] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
UpperCAmelCase_ : Tuple = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
UpperCAmelCase_ : Any = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
UpperCAmelCase_ : int = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
UpperCAmelCase_ : List[Any] = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
UpperCAmelCase_ : str = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
UpperCAmelCase_ : List[Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
UpperCAmelCase_ : List[Any] = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
UpperCAmelCase_ : str = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
UpperCAmelCase_ : Optional[Any] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
UpperCAmelCase_ : Optional[int] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
UpperCAmelCase_ : Tuple = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
UpperCAmelCase_ : str = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def lowerCamelCase_ ( _a : Optional[Any] , _a : Dict ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : str = orig_state_dict.pop(_a )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase_ : Tuple = key.split(""".""" )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = int(key_split[2] ), int(key_split[4] )
UpperCAmelCase_ : int = config.vision_config.hidden_size
if "weight" in key:
UpperCAmelCase_ : Tuple = val[:dim, :]
UpperCAmelCase_ : Dict = val[dim : dim * 2, :]
UpperCAmelCase_ : Optional[Any] = val[-dim:, :]
else:
UpperCAmelCase_ : Tuple = val[:dim]
UpperCAmelCase_ : str = val[dim : dim * 2]
UpperCAmelCase_ : Optional[Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase_ : List[Any] = key.split(""".""" )
UpperCAmelCase_ : int = int(key_split[3] )
UpperCAmelCase_ : int = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ : Dict = val[:dim, :]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : str = val[-dim:, :]
else:
UpperCAmelCase_ : Optional[Any] = val[:dim]
UpperCAmelCase_ : Optional[int] = val[dim : dim * 2]
UpperCAmelCase_ : int = val[-dim:]
else:
UpperCAmelCase_ : List[str] = rename_key(_a )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
UpperCAmelCase_ : Tuple = val.squeeze_()
else:
UpperCAmelCase_ : int = val
return orig_state_dict
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_ : Optional[Any] = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _a : Optional[Any] , _a : str , _a : List[str]="groupvit-gcc-yfcc" , _a : Tuple=False ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = GroupViTConfig()
UpperCAmelCase_ : Union[str, Any] = GroupViTModel(_a ).eval()
UpperCAmelCase_ : str = torch.load(_a , map_location="""cpu""" )["""model"""]
UpperCAmelCase_ : List[str] = convert_state_dict(_a , _a )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_a ) == 0)
# verify result
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : Optional[int] = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=_a , padding=_a , return_tensors="""pt""" )
with torch.no_grad():
UpperCAmelCase_ : int = model(**_a )
if model_name == "groupvit-gcc-yfcc":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
UpperCAmelCase_ : str = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , _a , atol=1E-3 )
processor.save_pretrained(_a )
model.save_pretrained(_a )
print("""Successfully saved processor and model to""" , _a )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(_a , organization="""nielsr""" )
model.push_to_hub(_a , organization="""nielsr""" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
UpperCamelCase_ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 345 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
A : Any = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def UpperCamelCase ( __magic_name__ : str = "dhaka" , __magic_name__ : int = 5 ) -> int:
"""simple docstring"""
lowercase__ = min(__magic_name__ , 50 ) # Prevent abuse!
lowercase__ = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
lowercase__ = requests.get("""https://www.google.com/search""" , params=__magic_name__ , headers=__magic_name__ )
lowercase__ = BeautifulSoup(html.text , """html.parser""" )
lowercase__ = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
lowercase__ = json.dumps(__magic_name__ )
lowercase__ = json.loads(__magic_name__ )
lowercase__ = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , __magic_name__ , )
if not matched_google_image_data:
return 0
lowercase__ = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(__magic_name__ ) , )
lowercase__ = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , __magic_name__ , )
for index, fixed_full_res_image in enumerate(__magic_name__ ):
if index >= max_images:
return index
lowercase__ = bytes(__magic_name__ , """ascii""" ).decode(
"""unicode-escape""" )
lowercase__ = bytes(__magic_name__ , """ascii""" ).decode(
"""unicode-escape""" )
lowercase__ = urllib.request.build_opener()
lowercase__ = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(__magic_name__ )
lowercase__ = f'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(__magic_name__ ):
os.makedirs(__magic_name__ )
urllib.request.urlretrieve( # noqa: S310
__magic_name__ , f'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
A : List[Any] = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('Please provide a search term.')
raise
| 146 |
from functools import lru_cache
def UpperCamelCase ( __magic_name__ : int ) -> set:
"""simple docstring"""
lowercase__ = 2
lowercase__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__magic_name__ ) )
def UpperCamelCase ( __magic_name__ : list ) -> bool:
"""simple docstring"""
return len(set(__magic_name__ ) ) in (0, 1)
def UpperCamelCase ( __magic_name__ : int ) -> list:
"""simple docstring"""
lowercase__ = 2
while True:
# Increment each value of a generated range
lowercase__ = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def UpperCamelCase ( __magic_name__ : int = 4 ) -> int:
"""simple docstring"""
lowercase__ = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 146 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase( __a ):
'''simple docstring'''
def __init__( self: str, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_snake_case : int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self: Optional[int], a_: int = 1, a_: Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_: float = 0.0, a_: int = 50, a_: Optional[bool] = None, a_: Optional[str] = "pil", a_: bool = True, ):
'''simple docstring'''
if isinstance(self.unet.config.sample_size, a_ ):
_snake_case : Optional[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_snake_case : Dict = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(a_, a_ ) and len(a_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(a_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_snake_case : Dict = randn_tensor(a_, generator=a_, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_snake_case : List[Any] = self.unet(a_, a_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_snake_case : Optional[int] = self.scheduler.step(
a_, a_, a_, eta=a_, use_clipped_model_output=a_, generator=a_ ).prev_sample
_snake_case : List[Any] = (image / 2 + 0.5).clamp(0, 1 )
_snake_case : Union[str, Any] = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
_snake_case : str = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 64 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ):
"""simple docstring"""
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : Any ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : List[Any] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : str = 16
elif accelerator.mixed_precision != "no":
_snake_case : Optional[int] = 8
else:
_snake_case : Optional[int] = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
_snake_case : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
_snake_case : List[Any] = 2
# Initialize accelerator
_snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config["""lr"""]
_snake_case : str = int(config["""num_epochs"""] )
_snake_case : Union[str, Any] = int(config["""seed"""] )
_snake_case : Union[str, Any] = int(config["""batch_size"""] )
_snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(snake_case__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ )
_snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate scheduler
_snake_case : str = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case : int = model(**snake_case__ )
_snake_case : str = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : int = model(**snake_case__ )
_snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
_snake_case : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case : Dict = parser.parse_args()
_snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 64 | 1 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __A ( torch.nn.Module ):
def __init__(self : Tuple , __a : List[Any]="sayef/fsner-bert-base-uncased" ):
super(__a , self ).__init__()
UpperCAmelCase_ = AutoModel.from_pretrained(__a , return_dict=__a )
UpperCAmelCase_ = torch.nn.CosineSimilarity(3 , 1E-08 )
UpperCAmelCase_ = torch.nn.Softmax(dim=1 )
def _lowercase (self : int , **__a : List[str] ):
return self.bert(**__a ).last_hidden_state
def _lowercase (self : Union[str, Any] , __a : Tuple ):
return token_embeddings.sum(2 , keepdim=__a )
def _lowercase (self : Optional[Any] , __a : Optional[int] , __a : Tuple , __a : int=1 ):
return self.softmax(T * self.cos(__a , __a ) )
def _lowercase (self : int , __a : Union[str, Any] , __a : Optional[Any] ):
UpperCAmelCase_ = W_supports["sizes"].tolist()
UpperCAmelCase_ = W_supports["start_token_id"].item()
UpperCAmelCase_ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase_ = self.BERT(**__a )
UpperCAmelCase_ = self.BERT(**__a )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = W_supports["input_ids"] == start_token_id
UpperCAmelCase_ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = support_sizes[i - 1]
UpperCAmelCase_ = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase_ = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase_ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase_ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase_ = torch.vstack((p_starts, p_start) )
UpperCAmelCase_ = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase_ = p_start
UpperCAmelCase_ = p_end
return p_starts, p_ends
| 106 | '''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
SCREAMING_SNAKE_CASE_: Tuple =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={'facebook/bart-base': BartForConditionalGeneration}
SCREAMING_SNAKE_CASE_: int ={'facebook/bart-base': BartTokenizer}
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=snake_case_ , default=snake_case_ , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=snake_case_ , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=snake_case_ , default=snake_case_ , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=snake_case_ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case_ , )
parser.add_argument(
"--config_name" , type=snake_case_ , default=snake_case_ , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=snake_case_ , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=snake_case_ , default=snake_case_ , help="Where to store the final ONNX file." )
UpperCAmelCase_ = parser.parse_args()
return args
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : int="cpu" ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = model_dict[model_name].from_pretrained(snake_case_ ).to(snake_case_ )
UpperCAmelCase_ = tokenizer_dict[model_name].from_pretrained(snake_case_ )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
return huggingface_model, tokenizer
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Dict ) -> Dict:
'''simple docstring'''
model.eval()
UpperCAmelCase_ = None
UpperCAmelCase_ = torch.jit.script(BARTBeamSearchGenerator(snake_case_ ) )
with torch.no_grad():
UpperCAmelCase_ = "My friends are cool but they eat too many carbs."
UpperCAmelCase_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="pt" ).to(model.device )
UpperCAmelCase_ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=snake_case_ , max_length=snake_case_ , early_stopping=snake_case_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case_ , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case_ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=snake_case_ , )
logger.info("Model exported to {}".format(snake_case_ ) )
UpperCAmelCase_ = remove_dup_initializers(os.path.abspath(snake_case_ ) )
logger.info("Deduplicated and optimized model written to {}".format(snake_case_ ) )
UpperCAmelCase_ = onnxruntime.InferenceSession(snake_case_ )
UpperCAmelCase_ = ort_sess.run(
snake_case_ , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(snake_case_ ),
"max_length": np.array(snake_case_ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = parse_args()
UpperCAmelCase_ = 5
UpperCAmelCase_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ = torch.device(args.device )
UpperCAmelCase_ , UpperCAmelCase_ = load_model_tokenizer(args.model_name_or_path , snake_case_ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(snake_case_ )
if args.max_length:
UpperCAmelCase_ = args.max_length
if args.num_beams:
UpperCAmelCase_ = args.num_beams
if args.output_file_path:
UpperCAmelCase_ = args.output_file_path
else:
UpperCAmelCase_ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 106 | 1 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowercase ( snake_case_ : List[str] ,snake_case_ : int ,snake_case_ : Dict=1e-12 ) ->Any:
'''simple docstring'''
__A : str = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(snake_case_ ,axis=1 ) ,a_min=snake_case_ ) ).T
__A : List[Any] = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(snake_case_ ,axis=1 ) ,a_min=snake_case_ ) ).T
return jnp.matmul(snake_case_ ,norm_emb_a.T )
class __snake_case ( nn.Module ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = jnp.floataa
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
__A : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__A : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__A : str = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__A : Union[str, Any] = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
__A : str = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Any = self.vision_model(__lowerCamelCase )[1]
__A : Union[str, Any] = self.visual_projection(__lowerCamelCase )
__A : Dict = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__A : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__A : Any = 0.0
__A : str = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__A : Optional[int] = jnp.round(__lowerCamelCase , 3 )
__A : Union[str, Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__A : Union[str, Any] = is_special_care * 0.0_1
__A : Dict = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__A : str = jnp.round(__lowerCamelCase , 3 )
__A : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = """clip_input"""
_lowerCamelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = jnp.floataa , __lowerCamelCase = True , **__lowerCamelCase , ):
'''simple docstring'''
if input_shape is None:
__A : Any = (1, 224, 224, 3)
__A : Optional[int] = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : List[Any] = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__A , __A : Optional[int] = jax.random.split(__lowerCamelCase )
__A : Optional[int] = {'''params''': params_rng, '''dropout''': dropout_rng}
__A : Any = self.module.init(__lowerCamelCase , __lowerCamelCase )['''params''']
return random_params
def __call__( self , __lowerCamelCase , __lowerCamelCase = None , ):
'''simple docstring'''
__A : Optional[int] = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 179 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """facebook/bart-large-mnli"""
_lowerCamelCase = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_lowerCamelCase = """text_classifier"""
_lowerCamelCase = AutoTokenizer
_lowerCamelCase = AutoModelForSequenceClassification
_lowerCamelCase = ["""text""", ["""text"""]]
_lowerCamelCase = ["""text"""]
def UpperCamelCase__( self ):
'''simple docstring'''
super().setup()
__A : List[str] = self.model.config
__A : int = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__A : List[str] = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [F"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = outputs.logits
__A : List[str] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 179 | 1 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = ['a', 'b', 'c']
# Defaults to last layer if both are None
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_aligned_output_features_output_indices(snake_case_ , snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , ['c'] )
self.assertEqual(snake_case_ , [2] )
# Out indices set to match out features
UpperCAmelCase_ , UpperCAmelCase_ : int = get_aligned_output_features_output_indices(['a', 'c'] , snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , ['a', 'c'] )
self.assertEqual(snake_case_ , [0, 2] )
# Out features set to match out indices
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_aligned_output_features_output_indices(snake_case_ , [0, 2] , snake_case_ )
self.assertEqual(snake_case_ , ['a', 'c'] )
self.assertEqual(snake_case_ , [0, 2] )
# Out features selected from negative indices
UpperCAmelCase_ , UpperCAmelCase_ : int = get_aligned_output_features_output_indices(snake_case_ , [-3, -1] , snake_case_ )
self.assertEqual(snake_case_ , ['a', 'c'] )
self.assertEqual(snake_case_ , [-3, -1] )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , snake_case_ )
# Out features must be a list
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(snake_case_ , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(snake_case_ , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = BackboneMixin()
UpperCAmelCase_ : Any = ['a', 'b', 'c']
UpperCAmelCase_ : str = ['a', 'c']
UpperCAmelCase_ : str = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase_ : str = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
UpperCAmelCase_ : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 274 | '''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def _lowerCamelCase ( ):
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 274 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__A : Optional[Any] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
__A : str = {
'''camembert-base''': 512,
}
__A : Any = '''▁'''
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Any , A : Any , A : Optional[Any]="<s>" , A : Optional[Any]="</s>" , A : str="</s>" , A : Optional[int]="<s>" , A : List[str]="<unk>" , A : List[Any]="<pad>" , A : Optional[Any]="<mask>" , A : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , A : Optional[Dict[str, Any]] = None , **A : List[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
lowercase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
lowercase_ : Union[str, Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase_ : List[Any] = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowercase_ : Dict = len(self.fairseq_tokens_to_ids )
lowercase_ : List[Any] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A ( self : Any , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
lowercase_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : List[Any] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def A ( self : Union[str, Any] , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
lowercase_ : List[Any] = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : List[str] ) -> Union[str, Any]:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def A ( self : Any ) -> Optional[Any]:
lowercase_ : List[Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Optional[int] , A : str ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def A ( self : Union[str, Any] , A : str ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A )
def A ( self : int , A : List[str] ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self : Tuple , A : Tuple ) -> Optional[Any]:
lowercase_ : Tuple = []
lowercase_ : int = ''''''
lowercase_ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
lowercase_ : Dict = True
lowercase_ : Any = []
else:
current_sub_tokens.append(A )
lowercase_ : int = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __getstate__( self : Dict ) -> Union[str, Any]:
lowercase_ : Optional[int] = self.__dict__.copy()
lowercase_ : Tuple = None
return state
def __setstate__( self : Optional[int] , A : Optional[Any] ) -> Union[str, Any]:
lowercase_ : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : List[str] = {}
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : List[Any] , A : str , A : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Union[str, Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
lowercase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 33 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A : str = logging.get_logger(__name__)
A : Union[str, Any] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = '''layoutlmv3'''
def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=5_02_65 , __lowerCAmelCase : Tuple=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : List[Any]=30_72 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Tuple=10_24 , __lowerCAmelCase : List[str]=1_28 , __lowerCAmelCase : Optional[int]=1_28 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Any=1_28 , __lowerCAmelCase : str=64 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=2_24 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = max_ad_position_embeddings
A__ = coordinate_size
A__ = shape_size
A__ = has_relative_attention_bias
A__ = rel_pos_bins
A__ = max_rel_pos
A__ = has_spatial_attention_bias
A__ = rel_ad_pos_bins
A__ = max_rel_ad_pos
A__ = text_embed
A__ = visual_embed
A__ = input_size
A__ = num_channels
A__ = patch_size
A__ = classifier_dropout
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : List[str] = version.parse('''1.12''' )
@property
def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def a_ ( self : Optional[int] ) -> float:
"""simple docstring"""
return 1e-5
@property
def a_ ( self : Tuple ) -> int:
"""simple docstring"""
return 12
def a_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
A__ = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
A__ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
A__ = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 274 | 0 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
__A : int = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__A : List[str] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__A : str = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self : int )->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=None )->Dict:
return {
"matthews_correlation": float(matthews_corrcoef(__UpperCamelCase , __UpperCamelCase , sample_weight=__UpperCamelCase ) ),
}
| 355 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(number**0.5 )
return number == sq * sq
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCAmelCase = x_den * y_den * z_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowercase ( _SCREAMING_SNAKE_CASE : int = 35 ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = 42
_UpperCAmelCase = Fraction(0 )
_UpperCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_UpperCAmelCase = x_num * y_den + x_den * y_num
_UpperCAmelCase = x_den * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCAmelCase = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_UpperCAmelCase = x_num * y_num
_UpperCAmelCase = x_den * y_num + x_num * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = x_num * x_num * y_num * y_num
_UpperCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 326 | 0 |
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE ( _a ):
pass
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowerCamelCase : Any ):
UpperCamelCase :Any = data
UpperCamelCase :Node | None = None
def __iter__( self : List[Any] ):
UpperCamelCase :Union[str, Any] = self
UpperCamelCase :Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__lowerCamelCase )
yield node.data
UpperCamelCase :int = node.next_node
@property
def _A ( self : str ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = Node(1)
UpperCAmelCase_ : Any = Node(2)
UpperCAmelCase_ : List[str] = Node(3)
UpperCAmelCase_ : Union[str, Any] = Node(4)
print(root_node.has_loop) # False
UpperCAmelCase_ : Optional[int] = root_node.next_node
print(root_node.has_loop) # True
UpperCAmelCase_ : List[str] = Node(5)
UpperCAmelCase_ : List[str] = Node(6)
UpperCAmelCase_ : Optional[Any] = Node(5)
UpperCAmelCase_ : Optional[int] = Node(6)
print(root_node.has_loop) # False
UpperCAmelCase_ : Dict = Node(1)
print(root_node.has_loop) # False
| 38 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=8 ) ->Tuple:
_SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A , A , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=A , scheduler=A , movq=A , )
_SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_( self , A , A , A , A , A , A ) -> Union[str, Any]:
if latents is None:
_SCREAMING_SNAKE_CASE = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_SCREAMING_SNAKE_CASE = latents.to(A )
_SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def snake_case_( self , A=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_SCREAMING_SNAKE_CASE = torch.device(f'cuda:{gpu_id}' )
_SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def snake_case_( self , A=0 ) -> str:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_SCREAMING_SNAKE_CASE = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cpu_offload_with_hook(A , A , prev_module_hook=A )
# We'll offload the last model manually.
_SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_( self ) -> Tuple:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self , A , A , A = 512 , A = 512 , A = 100 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> List[str]:
_SCREAMING_SNAKE_CASE = self._execution_device
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = torch.cat(A , dim=0 )
_SCREAMING_SNAKE_CASE = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = torch.cat(A , dim=0 )
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(A , dim=0 )
_SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(A , dim=0 )
_SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps
_SCREAMING_SNAKE_CASE = self.unet.config.in_channels
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = downscale_height_and_width(A , A , self.movq_scale_factor )
# create initial latent
_SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = {"""image_embeds""": image_embeds}
_SCREAMING_SNAKE_CASE = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(
A , A , A , generator=A , )[0]
# post-processing
_SCREAMING_SNAKE_CASE = self.movq.decode(A , force_not_quantize=A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
_SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 58 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
SCREAMING_SNAKE_CASE : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __UpperCAmelCase ( snake_case_ : dict[int, list[int]] , snake_case_ : int , snake_case_ : list[bool] ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case_ , snake_case_ , snake_case_ )
order.append(snake_case_ )
return order
def __UpperCAmelCase ( snake_case_ : dict[int, list[int]] , snake_case_ : int , snake_case_ : list[bool] ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case_ , snake_case_ , snake_case_ )
return component
def __UpperCAmelCase ( snake_case_ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
_lowerCAmelCase = len(snake_case_ ) * [False]
_lowerCAmelCase = {vert: [] for vert in range(len(snake_case_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case_ )
_lowerCAmelCase = []
for i, was_visited in enumerate(snake_case_ ):
if not was_visited:
order += topology_sort(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase = []
_lowerCAmelCase = len(snake_case_ ) * [False]
for i in range(len(snake_case_ ) ):
_lowerCAmelCase = order[len(snake_case_ ) - i - 1]
if not visited[vert]:
_lowerCAmelCase = find_components(snake_case_ , snake_case_ , snake_case_ )
components_list.append(snake_case_ )
return components_list | 317 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase ) | 317 | 1 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( _lowerCamelCase : str ) -> Optional[Any]:
if isinstance(_lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class a :
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
pass
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
pass
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float ) -> Union[str, Any]:
lowerCamelCase_ = np.abs((a - b) ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
lowerCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
lowerCamelCase_ , lowerCamelCase_ = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {'vision_model': vision_model, 'text_model': text_model}
lowerCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int]=None , **__SCREAMING_SNAKE_CASE : int ) -> List[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {'vision_model': vision_model, 'text_model': text_model}
lowerCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = after_output[0]
lowerCamelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {'vision_model': vision_model, 'text_model': text_model}
lowerCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = to_atuple(vision_model.config.image_size )
lowerCamelCase_ = to_atuple(vision_model.config.patch_size )
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase_ = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
pt_model.to(__SCREAMING_SNAKE_CASE )
pt_model.eval()
# prepare inputs
lowerCamelCase_ = inputs_dict
lowerCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase_ = pt_model(**__SCREAMING_SNAKE_CASE ).to_tuple()
lowerCamelCase_ = fx_model(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE , from_pt=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = fx_model_loaded(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = VisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE , from_flax=__SCREAMING_SNAKE_CASE )
pt_model_loaded.to(__SCREAMING_SNAKE_CASE )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase_ = pt_model_loaded(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2 )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
lowerCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = fx_state
self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
lowerCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , fx_model.params )
self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : int ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any ) -> str:
lowerCamelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Dict ) -> Tuple:
lowerCamelCase_ = self.prepare_config_and_inputs()
self.check_save_load(**__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[str] ) -> Tuple:
lowerCamelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def UpperCamelCase ( self : List[str] ) -> List[Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ = config_inputs_dict.pop('vision_config' )
lowerCamelCase_ = config_inputs_dict.pop('text_config' )
lowerCamelCase_ = config_inputs_dict
self.check_equivalence_pt_to_flax(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_equivalence_flax_to_pt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase ( self : Any ) -> Optional[int]:
lowerCamelCase_ , lowerCamelCase_ = self.get_pretrained_model_and_inputs()
lowerCamelCase_ = model_a(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = model_a(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = after_outputs[0]
lowerCamelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-5 )
@require_flax
class a ( __snake_case , unittest.TestCase ):
def UpperCamelCase ( self : Any ) -> Dict:
lowerCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__SCREAMING_SNAKE_CASE , text_from_pt=__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = 13
lowerCamelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCamelCase_ = random_attention_mask([batch_size, 4] )
lowerCamelCase_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
lowerCamelCase_ = FlaxViTModel(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = FlaxBertModel(__SCREAMING_SNAKE_CASE )
return vision_model, text_model
def UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowerCamelCase_ = FlaxViTModelTester(self )
lowerCamelCase_ = FlaxBertModelTester(self )
lowerCamelCase_ = vit_model_tester.prepare_config_and_inputs()
lowerCamelCase_ = bert_model_tester.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ = vision_config_and_inputs
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class a ( __snake_case , unittest.TestCase ):
def UpperCamelCase ( self : int ) -> int:
lowerCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__SCREAMING_SNAKE_CASE , text_from_pt=__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = 13
lowerCamelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCamelCase_ = random_attention_mask([batch_size, 4] )
lowerCamelCase_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Dict:
lowerCamelCase_ = FlaxCLIPVisionModel(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = FlaxBertModel(__SCREAMING_SNAKE_CASE )
return vision_model, text_model
def UpperCamelCase ( self : int ) -> List[Any]:
lowerCamelCase_ = FlaxCLIPVisionModelTester(self )
lowerCamelCase_ = FlaxBertModelTester(self )
lowerCamelCase_ = clip_model_tester.prepare_config_and_inputs()
lowerCamelCase_ = bert_model_tester.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ = vision_config_and_inputs
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class a ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : Optional[int] ) -> Tuple:
lowerCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
lowerCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='np' )
lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCamelCase_ = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 183 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : List[str] = MobileBertTokenizer
SCREAMING_SNAKE_CASE : int = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Dict = filter_non_english
SCREAMING_SNAKE_CASE : str = """google/mobilebert-uncased"""
def UpperCamelCase ( self : List[str] ) -> Dict:
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCamelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def UpperCamelCase ( self : Dict ) -> Any:
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self : Dict ) -> Dict:
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# With lower casing
lowerCamelCase_ = self.get_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[str] ) -> str:
lowerCamelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase ( self : Any ) -> Any:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase ( self : Tuple ) -> List[str]:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase ( self : Optional[int] ) -> Any:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self : Tuple ) -> Tuple:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self : List[str] ) -> List[Any]:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self : Tuple ) -> str:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCamelCase ( self : List[str] ) -> Any:
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase_ = {}
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=__SCREAMING_SNAKE_CASE , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def UpperCamelCase ( self : List[Any] ) -> Any:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCamelCase ( self : Union[str, Any] ) -> int:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCamelCase ( self : str ) -> Optional[Any]:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCamelCase ( self : int ) -> List[Any]:
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def UpperCamelCase ( self : Dict ) -> List[str]:
lowerCamelCase_ = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowerCamelCase_ = tokenizer_r.encode_plus(
__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(__SCREAMING_SNAKE_CASE , 'do_lower_case' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def UpperCamelCase ( self : Any ) -> List[Any]:
lowerCamelCase_ = ['的', '人', '有']
lowerCamelCase_ = ''.join(__SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__SCREAMING_SNAKE_CASE )
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 183 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : int =1_6
A_ : Optional[int] =3_2
def SCREAMING_SNAKE_CASE_ ( snake_case : Accelerator , snake_case : int = 16 )-> int:
_lowerCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
_lowerCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase = datasets.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase = 8
else:
_lowerCamelCase = None
return tokenizer.pad(
snake_case , padding='longest' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
_lowerCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
_lowerCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ : Optional[int] =mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : Optional[Any] )-> Tuple:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , snake_case ) == "1":
_lowerCamelCase = 2
# New Code #
_lowerCamelCase = int(args.gradient_accumulation_steps )
# Initialize accelerator
_lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase = config['lr']
_lowerCamelCase = int(config['num_epochs'] )
_lowerCamelCase = int(config['seed'] )
_lowerCamelCase = int(config['batch_size'] )
_lowerCamelCase = evaluate.load('glue' , 'mrpc' )
set_seed(snake_case )
_lowerCamelCase , _lowerCamelCase = get_dataloaders(snake_case , snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase = AdamW(params=model.parameters() , lr=snake_case )
# Instantiate scheduler
_lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# Now we train the model
for epoch in range(snake_case ):
model.train()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case ):
_lowerCamelCase = model(**snake_case )
_lowerCamelCase = output.loss
accelerator.backward(snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase = model(**snake_case )
_lowerCamelCase = outputs.logits.argmax(dim=-1 )
_lowerCamelCase , _lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case , references=snake_case , )
_lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , snake_case )
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
_lowerCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case , default=snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=snake_case , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 357 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
A_ : List[Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
A_ : List[str] =parser.parse_args()
A_ : Any =download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 80 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_a : Any= random.Random()
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int=1.0 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Dict=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
__snake_case : Optional[int] = global_rng
__snake_case : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase ( unittest.TestCase ):
def __init__(self : Any , _A : Optional[Any] , _A : Optional[Any]=7 , _A : Tuple=4_00 , _A : Union[str, Any]=20_00 , _A : List[Any]=1 , _A : int=0.0 , _A : Any=1_60_00 , _A : Union[str, Any]=True , _A : Union[str, Any]=True , ) -> Dict:
__snake_case : Optional[Any] = parent
__snake_case : Dict = batch_size
__snake_case : Any = min_seq_length
__snake_case : Any = max_seq_length
__snake_case : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case : Optional[int] = feature_size
__snake_case : Dict = padding_value
__snake_case : List[str] = sampling_rate
__snake_case : Any = return_attention_mask
__snake_case : List[str] = do_normalize
def _lowercase (self : Dict) -> Optional[int]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase (self : Optional[int] , _A : Any=False , _A : int=False) -> str:
def _flatten(_A : str):
return list(itertools.chain(*_A))
if equal_length:
__snake_case : Optional[int] = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
__snake_case : str = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__snake_case : Optional[Any] = [np.asarray(_A) for x in speech_inputs]
return speech_inputs
class UpperCamelCase ( lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase : str = WavaVecaFeatureExtractor
def _lowercase (self : Any) -> str:
__snake_case : Tuple = WavaVecaFeatureExtractionTester(self)
def _lowercase (self : int , _A : Tuple) -> Optional[int]:
self.assertTrue(np.all(np.mean(_A , axis=0) < 1E-3))
self.assertTrue(np.all(np.abs(np.var(_A , axis=0) - 1) < 1E-3))
def _lowercase (self : Union[str, Any]) -> List[str]:
__snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
__snake_case : Optional[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
__snake_case : Dict = [np.asarray(_A) for speech_input in speech_inputs]
# Test not batched input
__snake_case : List[Any] = feat_extract(speech_inputs[0] , return_tensors='np').input_values
__snake_case : Dict = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3))
# Test batched
__snake_case : Optional[Any] = feat_extract(_A , return_tensors='np').input_values
__snake_case : str = feat_extract(_A , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(_A , _A):
self.assertTrue(np.allclose(_A , _A , atol=1E-3))
# Test 2-D numpy arrays are batched.
__snake_case : Union[str, Any] = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
__snake_case : str = np.asarray(_A)
__snake_case : Union[str, Any] = feat_extract(_A , return_tensors='np').input_values
__snake_case : Tuple = feat_extract(_A , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(_A , _A):
self.assertTrue(np.allclose(_A , _A , atol=1E-3))
def _lowercase (self : Optional[int]) -> List[Any]:
__snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__snake_case : List[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
__snake_case : int = ["longest", "max_length", "do_not_pad"]
__snake_case : List[Any] = [None, 16_00, None]
for max_length, padding in zip(_A , _A):
__snake_case : Optional[Any] = feat_extract(_A , padding=_A , max_length=_A , return_tensors='np')
__snake_case : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self.assertTrue(input_values[0][8_00:].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self.assertTrue(input_values[0][10_00:].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def _lowercase (self : Tuple) -> Union[str, Any]:
__snake_case : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__snake_case : List[Any] = range(8_00 , 14_00 , 2_00)
__snake_case : List[str] = [floats_list((1, x))[0] for x in lengths]
__snake_case : Optional[int] = ["longest", "max_length", "do_not_pad"]
__snake_case : List[Any] = [None, 16_00, None]
for max_length, padding in zip(_A , _A):
__snake_case : int = feat_extract(_A , max_length=_A , padding=_A)
__snake_case : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def _lowercase (self : Any) -> Dict:
__snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__snake_case : Optional[int] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
__snake_case : int = feat_extract(
_A , truncation=_A , max_length=10_00 , padding='max_length' , return_tensors='np')
__snake_case : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def _lowercase (self : Dict) -> List[str]:
__snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__snake_case : str = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
__snake_case : List[Any] = feat_extract(
_A , truncation=_A , max_length=10_00 , padding='longest' , return_tensors='np')
__snake_case : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00))
__snake_case : List[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
__snake_case : Tuple = feat_extract(
_A , truncation=_A , max_length=20_00 , padding='longest' , return_tensors='np')
__snake_case : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00))
@require_torch
def _lowercase (self : Optional[Any]) -> int:
import torch
__snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__snake_case : Any = np.random.rand(1_00).astype(np.floataa)
__snake_case : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__snake_case : str = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
__snake_case : int = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def _lowercase (self : Dict) -> Optional[Any]:
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__snake_case : List[str] = WavaVecaConfig.from_pretrained(_A)
__snake_case : Dict = WavaVecaFeatureExtractor.from_pretrained(_A)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
| 172 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
snake_case : int = parent
snake_case : List[Any] = batch_size
snake_case : str = seq_length
snake_case : Optional[int] = is_training
snake_case : Optional[int] = use_attention_mask
snake_case : str = use_token_type_ids
snake_case : int = use_labels
snake_case : Any = vocab_size
snake_case : Any = hidden_size
snake_case : Any = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : List[str] = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : int = type_sequence_label_size
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = num_choices
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Tuple = None
if self.use_attention_mask:
snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : str = None
if self.use_token_type_ids:
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : str = config_and_inputs
snake_case : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ):
a__ : Optional[Any] = True
a__ : List[str] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case : List[Any] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=SCREAMING_SNAKE_CASE )
snake_case : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
snake_case : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE )[0]
snake_case : List[Any] = 50_000
snake_case : List[str] = (1, 6, vocab_size)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 148 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Tuple =logging.get_logger(__name__)
_A : Tuple ={
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowercase ( _lowercase ):
a = """sew-d"""
def __init__( self: str , UpperCamelCase__: List[str]=32 , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=3_072 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[int]=512 , UpperCamelCase__: Optional[Any]=256 , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=("p2c", "c2p") , UpperCamelCase__: Dict="layer_norm" , UpperCamelCase__: List[str]="gelu_python" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: Any=0.1 , UpperCamelCase__: Any=0.1 , UpperCamelCase__: List[Any]=0.0 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: str=1e-7 , UpperCamelCase__: str=1e-5 , UpperCamelCase__: List[str]="group" , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__: Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__: Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__: str=False , UpperCamelCase__: List[str]=128 , UpperCamelCase__: Union[str, Any]=16 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Optional[int]=0.05 , UpperCamelCase__: str=10 , UpperCamelCase__: Union[str, Any]=2 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Tuple=10 , UpperCamelCase__: List[Any]=0 , UpperCamelCase__: Tuple="mean" , UpperCamelCase__: Any=False , UpperCamelCase__: str=False , UpperCamelCase__: Any=256 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=1 , UpperCamelCase__: List[Any]=2 , **UpperCamelCase__: str , ):
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : int = feat_extract_norm
lowerCamelCase__ : int = feat_extract_activation
lowerCamelCase__ : str = list(UpperCamelCase__ )
lowerCamelCase__ : int = list(UpperCamelCase__ )
lowerCamelCase__ : Dict = list(UpperCamelCase__ )
lowerCamelCase__ : int = conv_bias
lowerCamelCase__ : Optional[Any] = num_conv_pos_embeddings
lowerCamelCase__ : Optional[Any] = num_conv_pos_embedding_groups
lowerCamelCase__ : int = len(self.conv_dim )
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : int = squeeze_factor
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Optional[Any] = position_buckets
lowerCamelCase__ : Optional[Any] = share_att_key
lowerCamelCase__ : str = relative_attention
lowerCamelCase__ : Dict = norm_rel_ebd
lowerCamelCase__ : Dict = list(UpperCamelCase__ )
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Dict = hidden_dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : List[str] = feat_proj_dropout
lowerCamelCase__ : Union[str, Any] = final_dropout
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : int = feature_layer_norm_eps
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ : List[Any] = apply_spec_augment
lowerCamelCase__ : str = mask_time_prob
lowerCamelCase__ : List[str] = mask_time_length
lowerCamelCase__ : Any = mask_time_min_masks
lowerCamelCase__ : List[Any] = mask_feature_prob
lowerCamelCase__ : str = mask_feature_length
lowerCamelCase__ : int = mask_feature_min_masks
# ctc loss
lowerCamelCase__ : List[Any] = ctc_loss_reduction
lowerCamelCase__ : Dict = ctc_zero_infinity
# sequence classification
lowerCamelCase__ : List[str] = use_weighted_layer_sum
lowerCamelCase__ : Union[str, Any] = classifier_proj_size
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 129 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _lowercase ( _lowercase ):
pass
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Any = data
lowerCamelCase__ : Node | None = None
def __iter__( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = self
lowerCamelCase__ : int = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
lowerCamelCase__ : List[str] = node.next_node
@property
def lowerCamelCase_ ( self: Optional[Any] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_A : Any =Node(1)
_A : Optional[int] =Node(2)
_A : Dict =Node(3)
_A : Optional[Any] =Node(4)
print(root_node.has_loop) # False
_A : Any =root_node.next_node
print(root_node.has_loop) # True
_A : Dict =Node(5)
_A : Union[str, Any] =Node(6)
_A : str =Node(5)
_A : int =Node(6)
print(root_node.has_loop) # False
_A : Optional[Any] =Node(1)
print(root_node.has_loop) # False
| 129 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : int="" ):
__lowercase : str = tempfile.mkdtemp()
return os.path.join(lowerCAmelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
__lowercase : Dict = AgentAudio(__a )
__lowercase : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__a , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__a ) )
# Ensure that the file contains the same value as the original tensor
__lowercase , __lowercase : List[str] = sf.read(__a )
self.assertTrue(torch.allclose(__a , torch.tensor(__a ) , atol=1E-4 ) )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = torch.rand(12 , dtype=torch.floataa ) - 0.5
__lowercase : List[str] = get_new_path(suffix=""".wav""" )
sf.write(__a , __a , 16000 )
__lowercase : Optional[int] = AgentAudio(__a )
self.assertTrue(torch.allclose(__a , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , __a )
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = torch.randint(0 , 256 , (64, 64, 3) )
__lowercase : str = AgentImage(__a )
__lowercase : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__a , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__a ) )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[int] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__lowercase : Union[str, Any] = Image.open(__a )
__lowercase : List[str] = AgentImage(__a )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__a ) )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__lowercase : List[Any] = Image.open(__a )
__lowercase : Tuple = AgentImage(__a )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__a ) )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Any = """Hey!"""
__lowercase : Dict = AgentText(__a )
self.assertEqual(__a , agent_type.to_string() )
self.assertEqual(__a , agent_type.to_raw() )
self.assertEqual(__a , __a ) | 233 |
def snake_case_ ( lowerCAmelCase_ : list ):
if len(lowerCAmelCase_ ) <= 1:
return [tuple(lowerCAmelCase_ )]
__lowercase : Any = []
def generate(lowerCAmelCase_ : int , lowerCAmelCase_ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__lowercase , __lowercase : List[str] = arr[k - 1], arr[i]
else: # k is odd
__lowercase , __lowercase : Any = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase_ )
generate(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return res
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr)) | 233 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
'''simple docstring'''
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[str]=13 , _SCREAMING_SNAKE_CASE: List[Any]=7 , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Optional[int]=99 , _SCREAMING_SNAKE_CASE: str=32 , _SCREAMING_SNAKE_CASE: Optional[int]=5 , _SCREAMING_SNAKE_CASE: List[Any]=4 , _SCREAMING_SNAKE_CASE: List[Any]=37 , _SCREAMING_SNAKE_CASE: Optional[Any]="gelu" , _SCREAMING_SNAKE_CASE: Dict=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=0.1 , _SCREAMING_SNAKE_CASE: Any=512 , _SCREAMING_SNAKE_CASE: Dict=16 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: List[Any]=3 , _SCREAMING_SNAKE_CASE: str=4 , _SCREAMING_SNAKE_CASE: Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = parent
__lowerCAmelCase : Optional[int] = batch_size
__lowerCAmelCase : List[Any] = seq_length
__lowerCAmelCase : List[str] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Union[str, Any] = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Any = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCAmelCase : Dict = max_position_embeddings
__lowerCAmelCase : str = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Dict = num_labels
__lowerCAmelCase : List[str] = num_choices
__lowerCAmelCase : List[str] = scope
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
__lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : int = None
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Any:
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[str]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = NystromformerModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = NystromformerForMaskedLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = NystromformerForQuestionAnswering(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : List[str] = NystromformerForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.num_labels
__lowerCAmelCase : Tuple = NystromformerForTokenClassification(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_choices
__lowerCAmelCase : int = NystromformerForMultipleChoice(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Any = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : Dict = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : Optional[int] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : int = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
__lowerCAmelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = NystromformerModelTester(self)
__lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase : Optional[Any] = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Any = NystromformerModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Any = NystromformerModel.from_pretrained("uw-madison/nystromformer-512")
__lowerCAmelCase : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : str = torch.Size((1, 6, 768))
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : Any = "the [MASK] of Belgium is Brussels"
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512")
__lowerCAmelCase : List[str] = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512")
__lowerCAmelCase : Dict = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="pt")
with torch.no_grad():
__lowerCAmelCase : str = model(encoding.input_ids).logits
__lowerCAmelCase : Any = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(_SCREAMING_SNAKE_CASE) , "capital") | 58 |
"""simple docstring"""
__snake_case : Any = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__snake_case : Union[str, Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__snake_case : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__snake_case : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__snake_case : Dict = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__snake_case : Any = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__snake_case : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__snake_case : str = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 58 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : int = '''ibert'''
def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=3_0_5_2_2 , lowerCAmelCase__ : Union[str, Any]=7_6_8 , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Optional[Any]=1_2 , lowerCAmelCase__ : Optional[int]=3_0_7_2 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Optional[int]=1E-12 , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : int=0 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Union[str, Any]="absolute" , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Any="none" , **lowerCAmelCase__ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = vocab_size
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = quant_mode
__SCREAMING_SNAKE_CASE : Optional[Any] = force_dequant
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 112 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase__ : Tuple = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ : List[Any] = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : Any = bs[:]
__SCREAMING_SNAKE_CASE : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Dict = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : str = char
return pairs
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : Any = PRETRAINED_VOCAB_FILES_MAP
_A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]="replace" , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : List[str]="</s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Union[str, Any]="<unk>" , lowerCAmelCase__ : Union[str, Any]="<pad>" , lowerCAmelCase__ : int="<mask>" , lowerCAmelCase__ : str=False , **lowerCAmelCase__ : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : str = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : Union[str, Any] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE : int = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : int = {}
__SCREAMING_SNAKE_CASE : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : str = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = bigram
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[int] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Dict = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Dict = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Tuple = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = word
return word
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = """""".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__SCREAMING_SNAKE_CASE : Tuple = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE : List[Any] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=False , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : int = """ """ + text
return (text, kwargs)
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super()._pad(
encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__SCREAMING_SNAKE_CASE : Tuple = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__SCREAMING_SNAKE_CASE : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__SCREAMING_SNAKE_CASE : str = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE : Dict = len(lowerCAmelCase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__SCREAMING_SNAKE_CASE : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 112 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, __snake_case=None ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = tf.cast(tf.math.not_equal(__snake_case, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCAmelCase:
lowercase__ = OPTConfig
lowercase__ = {}
lowercase__ = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , __a=16 , __a=16 , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
_UpperCamelCase = embed_dim
_UpperCamelCase = word_embed_proj_dim
_UpperCamelCase = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__a , **self.config_updates , )
_UpperCamelCase = prepare_opt_inputs_dict(__a , __a)
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = TFOPTModel(config=__a)
_UpperCamelCase = inputs_dict['''input_ids''']
_UpperCamelCase = input_ids[:1, :]
_UpperCamelCase = inputs_dict['''attention_mask'''][:1, :]
_UpperCamelCase = 1
# first forward pass
_UpperCamelCase = model(__a , attention_mask=__a , use_cache=__a)
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1)
_UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = model(__a , attention_mask=__a , past_key_values=__a)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1]))
_UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3)
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowercase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowercase__ = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = 10
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = TFOPTModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__a , __a):
if hasattr(__a , '''weight'''):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__a , '''weight'''):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_UpperCamelCase = model_class(config=__a)
_UpperCamelCase = _get_word_embedding_weight(__a , model.get_input_embeddings())
_UpperCamelCase = _get_word_embedding_weight(__a , model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(__a)
_UpperCamelCase = _get_word_embedding_weight(__a , model.get_input_embeddings())
_UpperCamelCase = _get_word_embedding_weight(__a , model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
_UpperCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __a)
# check that weights remain the same after resizing
_UpperCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
_UpperCamelCase = False
self.assertTrue(__a)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __a)
_UpperCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
_UpperCamelCase = False
self.assertTrue(__a)
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
return tf.constant(__snake_case, dtype=tf.intaa )
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
lowercase__ = 99
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = tf.ones((4, 1) , dtype=tf.intaa) * 2
_UpperCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3) + 3, eos_column_vector] , axis=1)
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''')
_UpperCamelCase = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
_UpperCamelCase = tf.not_equal(__a , model.config.pad_token_id)
with tf.GradientTape():
_UpperCamelCase = model(input_ids=__a , attention_mask=__a).last_hidden_state
_UpperCamelCase = (1, 11, 5_12)
self.assertEqual(output.shape , __a)
_UpperCamelCase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]])
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-3))
_UpperCamelCase = tf.function(__a , jit_compile=__a)
_UpperCamelCase = xla_generate(__a , __a)[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-2))
@require_tf
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
super().setUp()
_UpperCamelCase = '''facebook/opt-350m'''
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model)
_UpperCamelCase = GPTaTokenizer.from_pretrained(self.path_model)
_UpperCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_UpperCamelCase = tokenizer(__a , return_tensors='''tf''' , padding=__a , add_special_tokens=__a)
_UpperCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
_UpperCamelCase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
])
self.assertTrue(np.allclose(__a , __a , atol=1e-4))
_UpperCamelCase = tf.function(__a , jit_compile=__a)
_UpperCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
self.assertTrue(np.allclose(__a , __a , atol=1e-4))
@require_tf
@slow
class _UpperCAmelCase( unittest.TestCase ):
@property
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = '''facebook/opt-125m'''
_UpperCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_UpperCamelCase = []
_UpperCamelCase = GPTaTokenizer.from_pretrained(__a)
_UpperCamelCase = TFOPTForCausalLM.from_pretrained(__a)
for prompt in self.prompts:
_UpperCamelCase = tokenizer(__a , return_tensors='''tf''').input_ids
_UpperCamelCase = model.generate(__a , max_length=10)
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
predicted_outputs += generated_string
self.assertListEqual(__a , __a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = '''facebook/opt-350m'''
_UpperCamelCase = GPTaTokenizer.from_pretrained(__a)
_UpperCamelCase = TFOPTForCausalLM.from_pretrained(__a)
_UpperCamelCase = '''left'''
# use different length sentences to test batching
_UpperCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_UpperCamelCase = tokenizer(__a , return_tensors='''tf''' , padding=__a)
_UpperCamelCase = inputs['''input_ids''']
_UpperCamelCase = model.generate(input_ids=__a , attention_mask=inputs['''attention_mask'''])
_UpperCamelCase = tokenizer(sentences[0] , return_tensors='''tf''').input_ids
_UpperCamelCase = model.generate(input_ids=__a)
_UpperCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa))
_UpperCamelCase = tokenizer(sentences[1] , return_tensors='''tf''').input_ids
_UpperCamelCase = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings)
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
_UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a)
_UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__a)
_UpperCamelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(__a , __a)
self.assertListEqual(__a , [non_padded_sentence, padded_sentence])
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = '''facebook/opt-350m'''
_UpperCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_UpperCamelCase = []
_UpperCamelCase = GPTaTokenizer.from_pretrained(__a)
_UpperCamelCase = TFOPTForCausalLM.from_pretrained(__a)
for prompt in self.prompts:
_UpperCamelCase = tokenizer(__a , return_tensors='''tf''').input_ids
_UpperCamelCase = model.generate(__a , max_length=10)
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
predicted_outputs += generated_string
self.assertListEqual(__a , __a)
| 100 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> Any:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 100 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowercase: Optional[Any] = logging.get_logger(__name__)
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=None , lowerCamelCase_=None ):
"""simple docstring"""
if not conversation_id:
a = uuid.uuida()
if past_user_inputs is None:
a = []
if generated_responses is None:
a = []
a = conversation_id
a = past_user_inputs
a = generated_responses
a = text
def __eq__(self , lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
a = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
a = text
def UpperCamelCase_ (self ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
a = None
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
self.generated_responses.append(lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__(self ):
"""simple docstring"""
a = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
a = "user" if is_user else "bot"
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
lowerCAmelCase, R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ", )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
if self.tokenizer.pad_token_id is None:
a = self.tokenizer.eos_token
def UpperCamelCase_ (self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
a = {}
a = {}
a = {}
if min_length_for_response is not None:
a = min_length_for_response
if minimum_tokens is not None:
a = minimum_tokens
if "max_length" in generate_kwargs:
a = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
a = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__(self , lowerCamelCase_ , lowerCamelCase_=0 , **lowerCamelCase_ ):
"""simple docstring"""
a = super().__call__(lowerCamelCase_ , num_workers=lowerCamelCase_ , **lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) == 1:
return outputs[0]
return outputs
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=32 ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
a = self.tokenizer._build_conversation_input_ids(lowerCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
a = self._legacy_parse_and_tokenize(lowerCamelCase_ )
if self.framework == "pt":
a = torch.LongTensor([input_ids] )
elif self.framework == "tf":
a = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=10 , **lowerCamelCase_ ):
"""simple docstring"""
a = generate_kwargs.get("max_length" , self.model.config.max_length )
a = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
a = max_length - minimum_tokens
a = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
a = model_inputs["attention_mask"][:, -trim:]
a = model_inputs.pop("conversation" )
a = max_length
a = self.model.generate(**lowerCamelCase_ , **lowerCamelCase_ )
if self.model.config.is_encoder_decoder:
a = 1
else:
a = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=True ):
"""simple docstring"""
a = model_outputs["output_ids"]
a = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ , )
a = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowerCamelCase_ )
return conversation
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self.tokenizer.eos_token_id
a = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
if len(lowerCamelCase_ ) > self.tokenizer.model_max_length:
a = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 227 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = (DDPMScheduler,)
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
a = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**lowerCamelCase_ )
return config
def UpperCamelCase_ (self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , )
def UpperCamelCase_ (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**lowerCamelCase_ )
a = len(lowerCamelCase_ )
a = self.dummy_model()
a = self.dummy_sample_deter
a = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
a = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
a = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
a = pred_prev_sample
a = torch.sum(torch.abs(lowerCamelCase_ ) )
a = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(prediction_type="v_prediction" )
a = scheduler_class(**lowerCamelCase_ )
a = len(lowerCamelCase_ )
a = self.dummy_model()
a = self.dummy_sample_deter
a = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
a = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
a = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
a = pred_prev_sample
a = torch.sum(torch.abs(lowerCamelCase_ ) )
a = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**lowerCamelCase_ )
a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
a = scheduler.timesteps
for i, timestep in enumerate(lowerCamelCase_ ):
if i == len(lowerCamelCase_ ) - 1:
a = -1
else:
a = timesteps[i + 1]
a = scheduler.previous_timestep(lowerCamelCase_ )
a = prev_t.item()
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**lowerCamelCase_ )
a = [100, 87, 50, 51, 0]
with self.assertRaises(lowerCamelCase_ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**lowerCamelCase_ )
a = [100, 87, 50, 1, 0]
a = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**lowerCamelCase_ )
a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 227 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase : Optional[Any] ={
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] =[
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 196 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __a ( unittest.TestCase ):
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : List[Any]=56 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : List[str]=99 , SCREAMING_SNAKE_CASE : str=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Dict=7 , SCREAMING_SNAKE_CASE : List[Any]="gelu_new" , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Any=5_12 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Any=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : int="block_sparse" , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : Dict=3 , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = parent
UpperCamelCase__ : Union[str, Any] = batch_size
UpperCamelCase__ : Union[str, Any] = seq_length
UpperCamelCase__ : Dict = is_training
UpperCamelCase__ : Optional[int] = use_attention_mask
UpperCamelCase__ : List[str] = use_token_type_ids
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : List[str] = num_hidden_layers
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = intermediate_size
UpperCamelCase__ : str = hidden_act
UpperCamelCase__ : Tuple = hidden_dropout_prob
UpperCamelCase__ : Any = attention_probs_dropout_prob
UpperCamelCase__ : str = max_position_embeddings
UpperCamelCase__ : Tuple = type_vocab_size
UpperCamelCase__ : Dict = type_sequence_label_size
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : Any = num_choices
UpperCamelCase__ : Dict = rescale_embeddings
UpperCamelCase__ : Union[str, Any] = attention_type
UpperCamelCase__ : int = use_bias
UpperCamelCase__ : List[Any] = block_size
UpperCamelCase__ : Union[str, Any] = num_random_blocks
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Any = None
if self.use_attention_mask:
UpperCamelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : Any = None
if self.use_token_type_ids:
UpperCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : List[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = config_and_inputs
UpperCamelCase__ : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = False
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def __lowercase ( self : str ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase__ : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=None , **SCREAMING_SNAKE_CASE : List[Any] ):
return model(input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
with self.subTest("JIT Enabled" ):
UpperCamelCase__ : Tuple = model_jitted(**SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase__ : List[Any] = model_jitted(**SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any=1e-5 , SCREAMING_SNAKE_CASE : Tuple="outputs" , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
'''simple docstring'''
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) | 196 | 1 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
A__ : List[Any] =pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
inspect_dataset(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(lowerCAmelCase )
assert "__pycache__" not in os.listdir(lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
inspect_metric(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(lowerCAmelCase )
assert "__pycache__" not in os.listdir(lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase ):
get_dataset_config_info(lowerCAmelCase , config_name=lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = get_dataset_config_names(lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = get_dataset_infos(lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_lowerCAmelCase = expected_configs[0]
assert expected_config in infos
_lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = get_dataset_infos(lowerCAmelCase )
assert expected_config in infos
_lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase ):
get_dataset_split_names(lowerCAmelCase , config_name=lowerCAmelCase )
| 70 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
_lowercase: Optional[datasets.Features] = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
_lowercase: Tuple = PandasConfig
def lowercase__ ( self : Optional[Any] ) -> str:
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : List[str] , __snake_case : Dict ) -> int:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
_lowerCAmelCase = data_files
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) )
return splits
def lowercase__ ( self : List[Any] , __snake_case : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> Any:
for i, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
with open(__snake_case , """rb""" ) as f:
_lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(__snake_case ) )
yield i, self._cast_table(__snake_case )
| 70 | 1 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case (UpperCAmelCase__ ) -> str:
if isinstance(UpperCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCAmelCase:
"""simple docstring"""
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
pass
def _a ( self ):
pass
def _a ( self ):
pass
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCamelCase , _lowerCamelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
UpperCamelCase_: List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = FlaxVisionTextDualEncoderModel(_lowerCamelCase )
UpperCamelCase_: Dict = model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.get_vision_text_model(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Dict = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase_: List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase )
UpperCamelCase_: Optional[Any] = model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
UpperCamelCase_ ,UpperCamelCase_: Dict = self.get_vision_text_model(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase_: Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase )
UpperCamelCase_: List[str] = model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase )
UpperCamelCase_: List[str] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
UpperCamelCase_: Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase )
UpperCamelCase_: Tuple = model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase )
UpperCamelCase_: List[str] = after_output[0]
UpperCamelCase_: int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase , 1e-3 )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
UpperCamelCase_ ,UpperCamelCase_: Dict = self.get_vision_text_model(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: List[Any] = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase_: List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase )
UpperCamelCase_: Any = model(
input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , output_attentions=_lowerCamelCase )
UpperCamelCase_: Tuple = output.vision_model_output.attentions
self.assertEqual(len(_lowerCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_: Optional[Any] = to_atuple(vision_model.config.image_size )
UpperCamelCase_: Union[str, Any] = to_atuple(vision_model.config.patch_size )
UpperCamelCase_: List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCamelCase_: str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCamelCase_: Tuple = output.text_model_output.attentions
self.assertEqual(len(_lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
pt_model.to(_lowerCamelCase )
pt_model.eval()
# prepare inputs
UpperCamelCase_: List[str] = inputs_dict
UpperCamelCase_: Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCamelCase_: List[str] = pt_model(**_lowerCamelCase ).to_tuple()
UpperCamelCase_: Dict = fx_model(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_lowerCamelCase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCamelCase )
UpperCamelCase_: int = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
UpperCamelCase_: Dict = fx_model_loaded(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_lowerCamelCase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCamelCase )
UpperCamelCase_: int = VisionTextDualEncoderModel.from_pretrained(_lowerCamelCase , from_flax=_lowerCamelCase )
pt_model_loaded.to(_lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
UpperCamelCase_: Dict = pt_model_loaded(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_lowerCamelCase , pt_output_loaded.numpy() , 4e-2 )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Any = VisionTextDualEncoderModel(_lowerCamelCase )
UpperCamelCase_: int = FlaxVisionTextDualEncoderModel(_lowerCamelCase )
UpperCamelCase_: List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowerCamelCase )
UpperCamelCase_: Dict = fx_state
self.check_pt_flax_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Any = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: int = VisionTextDualEncoderModel(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = FlaxVisionTextDualEncoderModel(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = load_flax_weights_in_pytorch_model(_lowerCamelCase , fx_model.params )
self.check_pt_flax_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCamelCase )
@is_pt_flax_cross_test
def _a ( self ):
UpperCamelCase_: List[str] = self.prepare_config_and_inputs()
UpperCamelCase_: Tuple = config_inputs_dict.pop('vision_config' )
UpperCamelCase_: List[str] = config_inputs_dict.pop('text_config' )
UpperCamelCase_: Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.check_equivalence_flax_to_pt(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: int = self.get_pretrained_model_and_inputs()
UpperCamelCase_: Union[str, Any] = model_a(**_lowerCamelCase )
UpperCamelCase_: Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCamelCase )
UpperCamelCase_: Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase )
UpperCamelCase_: Optional[int] = model_a(**_lowerCamelCase )
UpperCamelCase_: Optional[Any] = after_outputs[0]
UpperCamelCase_: Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase , 1e-5 )
@require_flax
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_lowerCamelCase , text_from_pt=_lowerCamelCase , )
UpperCamelCase_: List[Any] = 1_3
UpperCamelCase_: Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase_: List[str] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase_: Optional[int] = random_attention_mask([batch_size, 4] )
UpperCamelCase_: int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = FlaxViTModel(_lowerCamelCase )
UpperCamelCase_: Dict = FlaxBertModel(_lowerCamelCase )
return vision_model, text_model
def _a ( self ):
UpperCamelCase_: Union[str, Any] = FlaxViTModelTester(self )
UpperCamelCase_: List[Any] = FlaxBertModelTester(self )
UpperCamelCase_: List[Any] = vit_model_tester.prepare_config_and_inputs()
UpperCamelCase_: List[str] = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_: int = vision_config_and_inputs
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_lowerCamelCase , text_from_pt=_lowerCamelCase , )
UpperCamelCase_: int = 1_3
UpperCamelCase_: List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase_: str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase_: Dict = random_attention_mask([batch_size, 4] )
UpperCamelCase_: Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = FlaxCLIPVisionModel(_lowerCamelCase )
UpperCamelCase_: Optional[int] = FlaxBertModel(_lowerCamelCase )
return vision_model, text_model
def _a ( self ):
UpperCamelCase_: Optional[int] = FlaxCLIPVisionModelTester(self )
UpperCamelCase_: List[Any] = FlaxBertModelTester(self )
UpperCamelCase_: Optional[int] = clip_model_tester.prepare_config_and_inputs()
UpperCamelCase_: List[str] = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = vision_config_and_inputs
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
UpperCamelCase_: str = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
UpperCamelCase_: int = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
UpperCamelCase_: List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase_: List[str] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_lowerCamelCase , padding=_lowerCamelCase , return_tensors='np' )
UpperCamelCase_: str = model(**_lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCamelCase_: List[Any] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _lowerCamelCase , atol=1e-3 ) ) | 292 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =RoFormerTokenizer
a : int =RoFormerTokenizerFast
a : int =True
a : Optional[int] =True
def _a ( self ):
super().setUp()
def _a ( self , **_lowerCamelCase ):
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **_lowerCamelCase )
def _a ( self , **_lowerCamelCase ):
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[int] = '永和服装饰品有限公司,今天天气非常好'
UpperCamelCase_: Any = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def _a ( self ):
UpperCamelCase_: int = self.get_tokenizer()
UpperCamelCase_ ,UpperCamelCase_: int = self.get_chinese_input_output_texts()
UpperCamelCase_: Tuple = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , output_text.split() )
UpperCamelCase_: Dict = tokens + [tokenizer.unk_token]
UpperCamelCase_: Dict = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.get_chinese_input_output_texts()
UpperCamelCase_: Optional[Any] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , output_text.split() )
UpperCamelCase_: str = tokens + [tokenizer.unk_token]
UpperCamelCase_: Optional[Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _a ( self ):
pass
def _a ( self ):
pass
def _a ( self ):
pass | 292 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class __snake_case :
def __init__( self , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = str(id_ )
snake_case__ : List[str] = None
snake_case__ : Dict = None
snake_case__ : Any = []
snake_case__ : Any = {} # {vertex:distance}
def __lt__( self , __UpperCamelCase ) -> Any:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> str:
'''simple docstring'''
return self.id
def __a ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
self.neighbors.append(__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = weight
def UpperCamelCase__ ( A__ , A__ , A__ , A__ ) -> str:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , A__ )
graph[b - 1].add_edge(graph[a - 1] , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> list:
snake_case__ : List[Any] = []
for u in graph:
snake_case__ : List[Any] = math.inf
snake_case__ : int = None
snake_case__ : Union[str, Any] = 0
snake_case__ : Optional[Any] = graph[:]
while q:
snake_case__ : List[Any] = min(A__ )
q.remove(A__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case__ : str = u
snake_case__ : Any = u.edges[v.id]
for i in range(1 , len(A__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase__ ( A__ , A__ ) -> Iterator[tuple]:
for u in graph:
snake_case__ : int = math.inf
snake_case__ : List[str] = None
snake_case__ : Optional[int] = 0
snake_case__ : Optional[Any] = list(A__ )
hq.heapify(A__ )
while h:
snake_case__ : int = hq.heappop(A__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case__ : List[str] = u
snake_case__ : Any = u.edges[v.id]
hq.heapify(A__ )
for i in range(1 , len(A__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ : List[Any] = {
'''google/rembert''': 2_56,
}
lowerCAmelCase__ : List[str] = '''▁'''
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = RemBertTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="[CLS]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<unk>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<pad>" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , **__UpperCamelCase , ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : int = do_lower_case
snake_case__ : Any = remove_space
snake_case__ : List[Any] = keep_accents
snake_case__ : Dict = vocab_file
snake_case__ : int = False if not self.vocab_file else True
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : Dict = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(__UpperCamelCase ) )
return
snake_case__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 143 | 1 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__UpperCAmelCase =["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a__ ( UpperCAmelCase__ ):
def __init__( self : Dict , a : List[str] , a : Optional[int] , a : List[Any]=None , a : Tuple=1 ):
"""simple docstring"""
__lowerCamelCase = tokenizer
__lowerCamelCase = dataset
__lowerCamelCase = len(a ) if n_tasks is None else n_tasks
__lowerCamelCase = n_copies
def __iter__( self : int ):
"""simple docstring"""
__lowerCamelCase = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
__lowerCamelCase = self.tokenizer(a , padding=a , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a__ ( UpperCAmelCase__ ):
def __init__( self : Dict , a : str , a : int , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = start_length
__lowerCamelCase = eof_strings
__lowerCamelCase = tokenizer
def __call__( self : Optional[int] , a : Optional[int] , a : Any , **a : str ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__lowerCamelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def __lowerCAmelCase ( UpperCamelCase__ ) -> Any:
__lowerCamelCase = re.split('''(%s)''' % '''|'''.join(UpperCamelCase__ ) , UpperCamelCase__ )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=20 , **UpperCamelCase__ ) -> List[Any]:
__lowerCamelCase = defaultdict(UpperCamelCase__ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(UpperCamelCase__ ) ):
with torch.no_grad():
__lowerCamelCase = batch['''ids'''].shape[-1]
__lowerCamelCase = accelerator.unwrap_model(UpperCamelCase__ ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=UpperCamelCase__ , **UpperCamelCase__ )
# each task is generated batch_size times
__lowerCamelCase = batch['''task_id'''].repeat(UpperCamelCase__ )
__lowerCamelCase = accelerator.pad_across_processes(
UpperCamelCase__ , dim=1 , pad_index=tokenizer.pad_token_id )
__lowerCamelCase , __lowerCamelCase = accelerator.gather((generated_tokens, generated_tasks) )
__lowerCamelCase = generated_tokens.cpu().numpy()
__lowerCamelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(UpperCamelCase__ , UpperCamelCase__ ):
gen_token_dict[task].append(UpperCamelCase__ )
__lowerCamelCase = [[] for _ in range(UpperCamelCase__ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__lowerCamelCase = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
code_gens[task].append(remove_last_block(UpperCamelCase__ ) )
return code_gens
def __lowerCAmelCase ( ) -> Optional[Any]:
# Setup configuration
__lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
__lowerCamelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__lowerCamelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__lowerCamelCase = '''false'''
if args.num_workers is None:
__lowerCamelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__lowerCamelCase = Accelerator()
set_seed(args.seed , device_specific=UpperCamelCase__ )
# Load model and tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
__lowerCamelCase = tokenizer.eos_token
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__lowerCamelCase = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCamelCase__ , UpperCamelCase__ )] ),
}
# Load evaluation dataset and metric
__lowerCamelCase = load_dataset('''openai_humaneval''' )
__lowerCamelCase = load_metric('''code_eval''' )
__lowerCamelCase = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
__lowerCamelCase = args.n_samples // args.batch_size
__lowerCamelCase = TokenizedDataset(UpperCamelCase__ , human_eval['''test'''] , n_copies=UpperCamelCase__ , n_tasks=UpperCamelCase__ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__lowerCamelCase = DataLoader(UpperCamelCase__ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__lowerCamelCase = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
__lowerCamelCase , __lowerCamelCase = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = complete_code(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , n_tasks=UpperCamelCase__ , batch_size=args.batch_size , **UpperCamelCase__ , )
if accelerator.is_main_process:
__lowerCamelCase = []
for task in tqdm(range(UpperCamelCase__ ) ):
__lowerCamelCase = human_eval['''test'''][task]['''test''']
__lowerCamelCase = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
__lowerCamelCase , __lowerCamelCase = code_eval_metric.compute(
references=UpperCamelCase__ , predictions=UpperCamelCase__ , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 237 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = 5
# Realm tok
__lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCamelCase = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(a , exist_ok=a )
__lowerCamelCase = os.path.join(a , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowerCamelCase = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(a , exist_ok=a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=a , )
return block_records
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.get_config()
__lowerCamelCase = self.get_dummy_retriever()
__lowerCamelCase = retriever.tokenizer
__lowerCamelCase = np.array([0, 3] , dtype='''long''' )
__lowerCamelCase = tokenizer(['''Test question'''] ).input_ids
__lowerCamelCase = tokenizer(
['''the fourth'''] , add_special_tokens=a , return_token_type_ids=a , return_attention_mask=a , ).input_ids
__lowerCamelCase = config.reader_seq_len
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = retriever(
a , a , answer_ids=a , max_length=a , return_tensors='''np''' )
self.assertEqual(len(a ) , 2 )
self.assertEqual(len(a ) , 2 )
self.assertEqual(len(a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_config()
__lowerCamelCase = self.get_dummy_retriever()
__lowerCamelCase = retriever.tokenizer
__lowerCamelCase = np.array([0, 3, 5] , dtype='''long''' )
__lowerCamelCase = tokenizer(['''Test question'''] ).input_ids
__lowerCamelCase = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=a , return_token_type_ids=a , return_attention_mask=a , ).input_ids
__lowerCamelCase = config.reader_seq_len
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = retriever(
a , a , answer_ids=a , max_length=a , return_tensors='''np''' )
self.assertEqual([False, True, True] , a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , a )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
__lowerCamelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
__lowerCamelCase = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
__lowerCamelCase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 237 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_a = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 17 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip_vision_model"
def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =qkv_bias
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "instructblip_qformer"
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]:
super().__init__(pad_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =cross_attention_frequency
__UpperCamelCase =encoder_hidden_size
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip"
UpperCAmelCase__ : Optional[Any] = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]:
super().__init__(**A_ )
if vision_config is None:
__UpperCamelCase ={}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__UpperCamelCase ={}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__UpperCamelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__UpperCamelCase =InstructBlipVisionConfig(**A_ )
__UpperCamelCase =InstructBlipQFormerConfig(**A_ )
__UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ )
__UpperCamelCase =self.text_config.tie_word_embeddings
__UpperCamelCase =self.text_config.is_encoder_decoder
__UpperCamelCase =num_query_tokens
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCamelCase =1.0
__UpperCamelCase =0.02
@classmethod
def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.qformer_config.to_dict()
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 62 | 0 |
from torch import nn
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Optional[int] , a__ : Dict , a__ : Any ):
"""simple docstring"""
super().__init__()
__snake_case = class_size
__snake_case = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__snake_case = nn.Linear(a__ , a__ )
def a (self : Any , a__ : Dict ):
"""simple docstring"""
__snake_case = self.mlp(a__ )
return logits
| 353 |
import re
import string
import numpy as np
import datasets
snake_case_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
snake_case_ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
snake_case_ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def a (self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def a (self : List[Any] , a__ : int , a__ : Optional[int] , a__ : Optional[Any]=None , a__ : Any=False , a__ : Dict=False , a__ : Tuple=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__snake_case = np.array([re.sub(a__ , '''''' , a__ ) for x in predictions] )
__snake_case = np.array([re.sub(a__ , '''''' , a__ ) for x in references] )
else:
__snake_case = np.asarray(a__ )
__snake_case = np.asarray(a__ )
if ignore_case:
__snake_case = np.char.lower(a__ )
__snake_case = np.char.lower(a__ )
if ignore_punctuation:
__snake_case = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
__snake_case = np.char.translate(a__ , table=a__ )
__snake_case = np.char.translate(a__ , table=a__ )
if ignore_numbers:
__snake_case = string.digits.maketrans('''''' , '''''' , string.digits )
__snake_case = np.char.translate(a__ , table=a__ )
__snake_case = np.char.translate(a__ , table=a__ )
__snake_case = predictions == references
return {"exact_match": np.mean(a__ ) * 100}
| 238 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : bool , _snake_case : list[int] , _snake_case : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _snake_case , _snake_case , _snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , _snake_case , _snake_case , _snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _snake_case , _snake_case , _snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , _snake_case , _snake_case , _snake_case ) , )
)
def _snake_case ( ):
lowerCAmelCase : Optional[int] = [90, 23, 6, 33, 21, 65, 123, 34423]
lowerCAmelCase : Union[str, Any] = math.log(len(_snake_case ) , 2 )
print(f'''Optimal value : {minimax(0 , 0 , _snake_case , _snake_case , _snake_case )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
lowerCAmelCase : Optional[int] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase : List[str] = jax.device_count()
lowerCAmelCase : Optional[int] = num_samples * [prompt]
lowerCAmelCase : Any = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = replicate(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = shard(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
lowerCAmelCase : Optional[Any] = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowerCAmelCase : str = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=2_5 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase : List[str] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = '''stabilityai/stable-diffusion-2'''
lowerCAmelCase, lowerCAmelCase : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' )
lowerCAmelCase, lowerCAmelCase : int = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase_ , scheduler=UpperCamelCase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
lowerCAmelCase : List[Any] = scheduler_params
lowerCAmelCase : List[Any] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase : Any = jax.device_count()
lowerCAmelCase : int = num_samples * [prompt]
lowerCAmelCase : int = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowerCAmelCase : Dict = replicate(UpperCamelCase_ )
lowerCAmelCase : Tuple = shard(UpperCamelCase_ )
lowerCAmelCase : int = jax.random.PRNGKey(0 )
lowerCAmelCase : Optional[int] = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowerCAmelCase : Tuple = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=2_5 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase : str = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase : Tuple = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Union[str, Any] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 292 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=None , UpperCAmelCase__="no" , UpperCAmelCase__="29500" ) -> List[Any]:
UpperCamelCase_: Any = False
UpperCamelCase_: List[str] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
UpperCamelCase_: List[Any] = True
elif "IPython" in sys.modules:
UpperCamelCase_: List[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
UpperCamelCase_: Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , UpperCAmelCase__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
UpperCamelCase_: List[str] = 8
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='TPU' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*UpperCAmelCase__ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port=UpperCAmelCase__ , mixed_precision=UpperCAmelCase__ ):
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='MULTI_GPU' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase_: Tuple = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=2 ) -> Optional[int]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , debug=UpperCAmelCase__ )
start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' ) | 292 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _A ( ):
"""simple docstring"""
a__ : Dict =ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE )
# Let's go
a__ : List[str] =parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 95 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''mra'''
def __init__( self : str , _A : List[str]=5_0265 , _A : int=768 , _A : Union[str, Any]=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : List[str]=0.02 , _A : Union[str, Any]=1e-5 , _A : Optional[int]="absolute" , _A : Union[str, Any]=4 , _A : List[Any]="full" , _A : Union[str, Any]=0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1 , _A : Union[str, Any]=0 , _A : Any=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = block_per_row
__SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode
__SCREAMING_SNAKE_CASE : Optional[int] = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks
| 303 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 366 |
from statistics import mean, stdev
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = min(lowerCAmelCase_ )
lowerCAmelCase__ = max(lowerCAmelCase_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase_ ) for x in data]
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = mean(lowerCAmelCase_ )
lowerCAmelCase__ = stdev(lowerCAmelCase_ )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase_ ) for x in data]
| 221 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> list:
snake_case_ = len(__lowercase )
for i in range(1 , __lowercase ):
snake_case_ = collection[i]
snake_case_ = 0
snake_case_ = i - 1
while low <= high:
snake_case_ = (low + high) // 2
if val < collection[mid]:
snake_case_ = mid - 1
else:
snake_case_ = mid + 1
for j in range(__lowercase , __lowercase , -1 ):
snake_case_ = collection[j - 1]
snake_case_ = val
return collection
if __name__ == "__main__":
__UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 69 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A: Tuple = 0
# Doctest custom flag to ignore output.
UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCamelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CustomOutputChecker
UpperCamelCase = HfDoctestModule
UpperCamelCase = HfDocTestParser
| 319 | 0 |
from __future__ import annotations
import math
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =str(lowerCAmelCase__ )
__a =[n]
for i in range(1 , len(lowerCAmelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
if len(str(lowerCAmelCase__ ) ) > 3:
if not is_prime(int(str(lowerCAmelCase__ )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase__ )[:3] ) ):
return False
return True
def UpperCamelCase_( _snake_case : int = 11 ):
"""simple docstring"""
__a =[]
__a =13
while len(lowerCAmelCase__ ) != count:
if validate(lowerCAmelCase__ ):
__a =list_truncated_nums(lowerCAmelCase__ )
if all(is_prime(lowerCAmelCase__ ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase__ )
num += 2
return list_truncated_primes
def UpperCamelCase_( ):
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''')
| 366 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =load_tool('text-to-speech' )
self.tool.setup()
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 308 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = 'RegNetConfig'
# Base docstring
UpperCAmelCase_ = 'facebook/regnet-y-040'
UpperCAmelCase_ = [1, 1_088, 7, 7]
# Image classification docstring
UpperCAmelCase_ = 'facebook/regnet-y-040'
UpperCAmelCase_ = 'tabby, tabby cat'
UpperCAmelCase_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase__( tf.keras.layers.Layer):
def __init__( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[str] = "relu" , **UpperCamelCase_: List[str] , ):
super().__init__(**UpperCamelCase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__lowerCamelCase = tf.keras.layers.ConvaD(
filters=UpperCamelCase_ , kernel_size=UpperCamelCase_ , strides=UpperCamelCase_ , padding="""VALID""" , groups=UpperCamelCase_ , use_bias=UpperCamelCase_ , name="""convolution""" , )
__lowerCamelCase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
__lowerCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.convolution(self.padding(UpperCamelCase_ ) )
__lowerCamelCase = self.normalization(UpperCamelCase_ )
__lowerCamelCase = self.activation(UpperCamelCase_ )
return hidden_state
class lowerCamelCase__( tf.keras.layers.Layer):
def __init__( self: Union[str, Any] , UpperCamelCase_: RegNetConfig , **UpperCamelCase_: Tuple ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = config.num_channels
__lowerCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Any ):
__lowerCamelCase = shape_list(UpperCamelCase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase = tf.transpose(UpperCamelCase_ , perm=(0, 2, 3, 1) )
__lowerCamelCase = self.embedder(UpperCamelCase_ )
return hidden_state
class lowerCamelCase__( tf.keras.layers.Layer):
def __init__( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int = 2 , **UpperCamelCase_: List[Any] ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = tf.keras.layers.ConvaD(
filters=UpperCamelCase_ , kernel_size=1 , strides=UpperCamelCase_ , use_bias=UpperCamelCase_ , name="""convolution""" )
__lowerCamelCase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: tf.Tensor , UpperCamelCase_: bool = False ):
return self.normalization(self.convolution(UpperCamelCase_ ) , training=UpperCamelCase_ )
class lowerCamelCase__( tf.keras.layers.Layer):
def __init__( self: str , UpperCamelCase_: int , UpperCamelCase_: int , **UpperCamelCase_: str ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCamelCase_ , name="""pooler""" )
__lowerCamelCase = [
tf.keras.layers.ConvaD(filters=UpperCamelCase_ , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=UpperCamelCase_ , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase = self.pooler(UpperCamelCase_ )
for layer_module in self.attention:
__lowerCamelCase = layer_module(UpperCamelCase_ )
__lowerCamelCase = hidden_state * pooled
return hidden_state
class lowerCamelCase__( tf.keras.layers.Layer):
def __init__( self: Union[str, Any] , UpperCamelCase_: RegNetConfig , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 1 , **UpperCamelCase_: Any ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = in_channels != out_channels or stride != 1
__lowerCamelCase = max(1 , out_channels // config.groups_width )
__lowerCamelCase = (
TFRegNetShortCut(UpperCamelCase_ , stride=UpperCamelCase_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase = [
TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
UpperCamelCase_ , stride=UpperCamelCase_ , groups=UpperCamelCase_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ , name="""layer.2""" ),
]
__lowerCamelCase = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = hidden_state
for layer_module in self.layers:
__lowerCamelCase = layer_module(UpperCamelCase_ )
__lowerCamelCase = self.shortcut(UpperCamelCase_ )
hidden_state += residual
__lowerCamelCase = self.activation(UpperCamelCase_ )
return hidden_state
class lowerCamelCase__( tf.keras.layers.Layer):
def __init__( self: Any , UpperCamelCase_: RegNetConfig , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 1 , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = in_channels != out_channels or stride != 1
__lowerCamelCase = max(1 , out_channels // config.groups_width )
__lowerCamelCase = (
TFRegNetShortCut(UpperCamelCase_ , stride=UpperCamelCase_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
__lowerCamelCase = [
TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
UpperCamelCase_ , stride=UpperCamelCase_ , groups=UpperCamelCase_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(UpperCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ , name="""layer.3""" ),
]
__lowerCamelCase = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str ):
__lowerCamelCase = hidden_state
for layer_module in self.layers:
__lowerCamelCase = layer_module(UpperCamelCase_ )
__lowerCamelCase = self.shortcut(UpperCamelCase_ )
hidden_state += residual
__lowerCamelCase = self.activation(UpperCamelCase_ )
return hidden_state
class lowerCamelCase__( tf.keras.layers.Layer):
def __init__( self: List[str] , UpperCamelCase_: RegNetConfig , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , **UpperCamelCase_: Tuple ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
__lowerCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ , name="""layers.0""" ),
*[layer(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
for layer_module in self.layers:
__lowerCamelCase = layer_module(UpperCamelCase_ )
return hidden_state
class lowerCamelCase__( tf.keras.layers.Layer):
def __init__( self: List[Any] , UpperCamelCase_: RegNetConfig , **UpperCamelCase_: Dict ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
__lowerCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCamelCase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , depth=UpperCamelCase_ , name=F'stages.{i+1}' ) )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: tf.Tensor , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True ):
__lowerCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase = hidden_states + (hidden_state,)
__lowerCamelCase = stage_module(UpperCamelCase_ )
if output_hidden_states:
__lowerCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase_ , hidden_states=UpperCamelCase_ )
@keras_serializable
class lowerCamelCase__( tf.keras.layers.Layer):
UpperCAmelCase__ : Union[str, Any] = RegNetConfig
def __init__( self: str , UpperCamelCase_: str , **UpperCamelCase_: List[str] ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = config
__lowerCamelCase = TFRegNetEmbeddings(UpperCamelCase_ , name="""embedder""" )
__lowerCamelCase = TFRegNetEncoder(UpperCamelCase_ , name="""encoder""" )
__lowerCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCamelCase_ , name="""pooler""" )
@unpack_inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: tf.Tensor , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: bool = False , ):
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = self.embedder(UpperCamelCase_ , training=UpperCamelCase_ )
__lowerCamelCase = self.encoder(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , training=UpperCamelCase_ )
__lowerCamelCase = encoder_outputs[0]
__lowerCamelCase = self.pooler(UpperCamelCase_ )
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase = tf.transpose(UpperCamelCase_ , perm=(0, 3, 1, 2) )
__lowerCamelCase = tf.transpose(UpperCamelCase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase = tuple([tf.transpose(UpperCamelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase_ , pooler_output=UpperCamelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = RegNetConfig
UpperCAmelCase__ : str = 'regnet'
UpperCAmelCase__ : Union[str, Any] = 'pixel_values'
@property
def lowerCAmelCase__ ( self: Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
UpperCAmelCase_ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCAmelCase_ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __lowerCamelCase , )
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Optional[Any] , UpperCamelCase_: RegNetConfig , *UpperCamelCase_: List[str] , **UpperCamelCase_: Any ):
super().__init__(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = TFRegNetMainLayer(UpperCamelCase_ , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: tf.Tensor , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: int=False , ):
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = self.regnet(
pixel_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , training=UpperCamelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __lowerCamelCase , )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
def __init__( self: Optional[int] , UpperCamelCase_: RegNetConfig , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: Dict ):
super().__init__(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = config.num_labels
__lowerCamelCase = TFRegNetMainLayer(UpperCamelCase_ , name="""regnet""" )
# classification head
__lowerCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: tf.Tensor = None , UpperCamelCase_: tf.Tensor = None , UpperCamelCase_: bool = None , UpperCamelCase_: bool = None , UpperCamelCase_: List[str]=False , ):
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = self.regnet(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , training=UpperCamelCase_ )
__lowerCamelCase = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase = self.classifier[0](UpperCamelCase_ )
__lowerCamelCase = self.classifier[1](UpperCamelCase_ )
__lowerCamelCase = None if labels is None else self.hf_compute_loss(labels=UpperCamelCase_ , logits=UpperCamelCase_ )
if not return_dict:
__lowerCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states )
| 12 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 1 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter image url: ').strip()
print(f'''Downloading image from {url} ...''')
lowerCAmelCase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase_ = soup.find('meta', {'property': 'og:image'})['content']
lowerCAmelCase_ = requests.get(image_url).content
lowerCAmelCase_ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''') | 351 |
import os
from collections.abc import Iterator
def snake_case( __magic_name__ = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__magic_name__ ):
lowercase : Tuple = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__magic_name__ )[1] in (".py", ".ipynb"):
yield os.path.join(__magic_name__ , __magic_name__ ).lstrip('''./''' )
def snake_case( __magic_name__ ) -> Dict:
'''simple docstring'''
return F"""{i * ' '}*""" if i else "\n##"
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Dict = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__magic_name__ ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(__magic_name__ )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def snake_case( __magic_name__ = "." ) -> None:
'''simple docstring'''
lowercase : str = ''''''
for filepath in sorted(good_file_paths(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = os.path.split(__magic_name__ )
if filepath != old_path:
lowercase : str = print_path(__magic_name__ , __magic_name__ )
lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase : Optional[Any] = F"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
lowercase : List[str] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F"""{md_prefix(__magic_name__ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.') | 116 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase__ ( unittest.TestCase):
def __init__( self :Optional[Any] , _A :Union[str, Any] , _A :Optional[Any]=7 , _A :Any=3 , _A :Optional[Any]=18 , _A :Dict=30 , _A :Tuple=400 , _A :Union[str, Any]=True , _A :Any=None , _A :Dict=True , ) -> Any:
'''simple docstring'''
__A = size if size is not None else {'height': 18, 'width': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = apply_ocr
def lowercase_ ( self :int ) -> List[Any]:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : Optional[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowercase_ ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = LayoutLMvaImageProcessingTester(self )
@property
def lowercase_ ( self :int ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'apply_ocr' ) )
def lowercase_ ( self :int ) -> Union[str, Any]:
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( self :List[Any] ) -> int:
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _A )
self.assertIsInstance(encoding.boxes , _A )
# Test batched
__A = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowercase_ ( self :Tuple ) -> Any:
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowercase_ ( self :int ) -> List[str]:
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowercase_ ( self :str ) -> Union[str, Any]:
'''simple docstring'''
__A = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__A = Image.open(ds[0]['file'] ).convert('RGB' )
__A = image_processing(_A , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__A = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _A )
self.assertListEqual(encoding.boxes , _A )
# with apply_OCR = False
__A = LayoutLMvaImageProcessor(apply_ocr=_A )
__A = image_processing(_A , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 161 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> float:
"""simple docstring"""
__A = np.array([[1, item, train_mtch[i]] for i, item in enumerate(UpperCAmelCase )] )
__A = np.array(UpperCAmelCase )
__A = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , UpperCAmelCase ) ) , x.transpose() ) , UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> float:
"""simple docstring"""
__A = (1, 2, 1)
__A = (1, 1, 0, 7)
__A = SARIMAX(
UpperCAmelCase , exog=UpperCAmelCase , order=UpperCAmelCase , seasonal_order=UpperCAmelCase )
__A = model.fit(disp=UpperCAmelCase , maxiter=6_0_0 , method='nm' )
__A = model_fit.predict(1 , len(UpperCAmelCase ) , exog=[test_match] )
return result[0]
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> float:
"""simple docstring"""
__A = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(UpperCAmelCase , UpperCAmelCase )
__A = regressor.predict(UpperCAmelCase )
return y_pred[0]
def snake_case ( UpperCAmelCase )-> float:
"""simple docstring"""
train_user.sort()
__A = np.percentile(UpperCAmelCase , 2_5 )
__A = np.percentile(UpperCAmelCase , 7_5 )
__A = qa - qa
__A = qa - (iqr * 0.1)
return low_lim
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> bool:
"""simple docstring"""
__A = 0
__A = 0
for i in list_vote:
if i > actual_result:
__A = not_safe + 1
else:
if abs(abs(UpperCAmelCase ) - abs(UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
a__ : List[str] = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
a__ : Optional[int] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
a__ : List[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
a__ : Dict = normalize_df[:, 2].tolist()
a__ : Optional[int] = normalize_df[:, 0].tolist()
a__ : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
a__ : Tuple = normalize_df[:, [1, 2]].tolist()
a__ : Dict = x[: len(x) - 1]
a__ : Any = x[len(x) - 1 :]
# for linear regression & sarimax
a__ : Tuple = total_date[: len(total_date) - 1]
a__ : List[Any] = total_user[: len(total_user) - 1]
a__ : List[Any] = total_match[: len(total_match) - 1]
a__ : List[str] = total_date[len(total_date) - 1 :]
a__ : List[str] = total_user[len(total_user) - 1 :]
a__ : Tuple = total_match[len(total_match) - 1 :]
# voting system with forecasting
a__ : Optional[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
a__ : List[str] = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 161 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''spiece.model'''}
__A ={
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__A ={
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__A =0
__A =1
__A =2
__A =3
__A =4
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = '''left'''
def __init__( self , lowercase , lowercase=False , lowercase=True , lowercase=False , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<sep>" , lowercase="<pad>" , lowercase="<cls>" , lowercase="<mask>" , lowercase=["<eop>", "<eod>"] , lowercase = None , **lowercase , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
lowerCamelCase_ = 3
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
if self.remove_space:
lowerCamelCase_ = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ = inputs
lowerCamelCase_ = outputs.replace("``" , "\"" ).replace("\'\'" , "\"" )
if not self.keep_accents:
lowerCamelCase_ = unicodedata.normalize("NFKD" , _a )
lowerCamelCase_ = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
lowerCamelCase_ = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = self.preprocess_text(_a )
lowerCamelCase_ = self.sp_model.encode(_a , out_type=_a )
lowerCamelCase_ = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ = cur_pieces[1:]
else:
lowerCamelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
return self.sp_model.IdToPiece(_a )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = "".join(_a ).replace(_a , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = False , lowercase = None , lowercase = True , **lowercase , ) -> List[str]:
lowerCamelCase_ = kwargs.pop("use_source_tokenizer" , _a )
lowerCamelCase_ = self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ = []
lowerCamelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
lowerCamelCase_ = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ = "".join(_a )
lowerCamelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[str]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = False ) -> List[str]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[str]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple:
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 363 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__A =logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCamelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = train_dataset.features["label"].names
if training_args.do_eval:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = predict_dataset.features["label"].names
# Labels
lowerCamelCase_ = len(lowerCamelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel={str(lowerCamelCase__ ): label for i, label in enumerate(lowerCamelCase__ )} , labelaid={label: i for i, label in enumerate(lowerCamelCase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
def preprocess_function(lowerCamelCase__ ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=lowerCamelCase__ , max_length=data_args.max_seq_length , truncation=lowerCamelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
lowerCamelCase_ = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCamelCase_ = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase__ ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
lowerCamelCase_ = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCamelCase_ = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_predict_samples )
lowerCamelCase_ = predict_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCamelCase_ = predict_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowerCamelCase_ = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions , lowerCamelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
return metric.compute(predictions=lowerCamelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCamelCase__ )
trainer.save_metrics("train" , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCamelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = trainer.predict(lowerCamelCase__ , metric_key_prefix="predict" )
lowerCamelCase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase__ )
)
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("predict" , lowerCamelCase__ )
trainer.save_metrics("predict" , lowerCamelCase__ )
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCamelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 47 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( lowercase_ , unittest.TestCase ):
UpperCAmelCase_ = XLNetTokenizer
UpperCAmelCase_ = XLNetTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = True
def UpperCAmelCase_ ( self :Dict ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = XLNetTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self :Optional[int] ) -> List[Any]:
UpperCAmelCase__ = '<s>'
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase_ ( self :Any ) -> Any:
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(__UpperCamelCase ) , 1006 )
def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase_ ( self :str ) -> Optional[int]:
UpperCAmelCase__ = XLNetTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
UpperCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def UpperCAmelCase_ ( self :List[str] ) -> Optional[Any]:
UpperCAmelCase__ = XLNetTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def UpperCAmelCase_ ( self :int ) -> List[Any]:
UpperCAmelCase__ = XLNetTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def UpperCAmelCase_ ( self :Any ) -> Dict:
UpperCAmelCase__ = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
UpperCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCamelCase )
UpperCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCamelCase )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCAmelCase_ ( self :int ) -> int:
UpperCAmelCase__ = {'input_ids': [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 169 | """simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=30 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase=None ,__UpperCamelCase=2 ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Tuple = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : List[str] = patch_size
lowercase_ : int = num_channels
lowercase_ : List[str] = is_training
lowercase_ : Union[str, Any] = use_labels
lowercase_ : str = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : int = scope
lowercase_ : Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Any = (image_size // patch_size) ** 2
lowercase_ : Optional[int] = num_patches + 1
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : Any = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[Any] = ViTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ViTForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ : Any = 1
lowercase_ : List[Any] = ViTForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Dict = self.type_sequence_label_size
lowercase_ : Dict = ViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Dict = 1
lowercase_ : Any = ViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[Any] = config_and_inputs
lowercase_ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = ViTModelTester(self )
lowercase_ : Dict = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase ,nn.Linear ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(__UpperCamelCase )
lowercase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Tuple = [*signature.parameters.keys()]
lowercase_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = ViTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__( ):
lowercase_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : str = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(__UpperCamelCase )
lowercase_ : Any = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : Tuple = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase_ : Any = model(**__UpperCamelCase )
# verify the logits
lowercase_ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : Optional[int] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : str = ViTModel.from_pretrained('facebook/dino-vits8' ).to(__UpperCamelCase )
lowercase_ : int = ViTImageProcessor.from_pretrained('facebook/dino-vits8' ,size=480 )
lowercase_ : int = prepare_img()
lowercase_ : Dict = image_processor(images=__UpperCamelCase ,return_tensors='pt' )
lowercase_ : int = inputs.pixel_values.to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase_ : int = model(__UpperCamelCase ,interpolate_pos_encoding=__UpperCamelCase )
# verify the logits
lowercase_ : Any = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,__UpperCamelCase )
lowercase_ : Any = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Optional[int] = ViTModel.from_pretrained('facebook/dino-vits8' ,torch_dtype=torch.floataa ,device_map='auto' )
lowercase_ : int = self.default_image_processor
lowercase_ : Optional[int] = prepare_img()
lowercase_ : Tuple = image_processor(images=__UpperCamelCase ,return_tensors='pt' )
lowercase_ : Union[str, Any] = inputs.pixel_values.to(__UpperCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )
| 213 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase=7 , __lowerCamelCase=3 , __lowerCamelCase=18 , __lowerCamelCase=30 , __lowerCamelCase=400 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=[0.5, 0.5, 0.5] , __lowerCamelCase=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__A : Dict = parent
__A : Any = batch_size
__A : Any = num_channels
__A : str = image_size
__A : Union[str, Any] = min_resolution
__A : Optional[Any] = max_resolution
__A : str = do_resize
__A : List[str] = size if size is not None else {'''height''': 18, '''width''': 20}
__A : str = do_thumbnail
__A : List[Any] = do_align_axis
__A : List[str] = do_pad
__A : Optional[Any] = do_normalize
__A : Union[str, Any] = image_mean
__A : List[Any] = image_std
def UpperCamelCase__( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = DonutImageProcessor if is_vision_available() else None
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = DonutImageProcessingTester(self )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__A : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
@is_flaky()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__A : List[Any] = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__A : Optional[int] = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__A : Dict = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 291 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class a__ ( snake_case__ ):
_a : Union[str, Any] = """mgp-str"""
def __init__( self , _A=[3_2, 1_2_8] , _A=4 , _A=3 , _A=2_7 , _A=3_8 , _A=5_0_2_5_7 , _A=3_0_5_2_2 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=4.0 , _A=True , _A=False , _A=1E-5 , _A=0.0 , _A=0.0 , _A=0.0 , _A=False , _A=0.02 , **_A , ):
"""simple docstring"""
super().__init__(**_A )
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = max_token_length
__lowerCAmelCase = num_character_labels
__lowerCAmelCase = num_bpe_labels
__lowerCAmelCase = num_wordpiece_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = distilled
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = drop_rate
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = attn_drop_rate
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = output_aa_attentions
__lowerCAmelCase = initializer_range
| 92 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCamelCase__ = get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=0 ):
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCAmelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCAmelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving model to {ckpt_dir}""" )
__lowerCAmelCase = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Loading model from {input_model_file}""" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCAmelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Loading model from {input_model_file}""" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCAmelCase = (
os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
__lowerCAmelCase = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , )
__lowerCAmelCase = state_dict["model"]
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=0 ):
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCAmelCase = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCAmelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCAmelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCAmelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
__lowerCAmelCase = (
os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
__lowerCAmelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , )
__lowerCAmelCase = optim_state["optimizer"]
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
__lowerCAmelCase = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
| 92 | 1 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
UpperCAmelCase_ = [x.strip() for x in open(lowerCAmelCase__ ).readlines()]
UpperCAmelCase_ = [x.strip() for x in open(lowerCAmelCase__ ).readlines()][: len(lowerCAmelCase__ )]
UpperCAmelCase_ = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
if save_path is not None:
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 241 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = "utf-8"
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = True # deprecated
UpperCamelCase = None # deprecated
UpperCamelCase = 10 << 20 # 10MB
UpperCamelCase = None
class lowercase__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCamelCase = JsonConfig
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
UpperCAmelCase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : Tuple , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={"files": files} ) )
return splits
def lowercase__ ( self : str , _UpperCAmelCase : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase_ = self.config.features.arrow_schema.field(_UpperCAmelCase ).type
UpperCAmelCase_ = pa_table.append_column(_UpperCAmelCase , pa.array([None] * len(_UpperCAmelCase ) , type=_UpperCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
# We keep only the field we are interested in
UpperCAmelCase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_UpperCAmelCase , (list, tuple) ):
UpperCAmelCase_ = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys}
else:
UpperCAmelCase_ = dataset
UpperCAmelCase_ = pa.Table.from_pydict(_UpperCAmelCase )
yield file_idx, self._cast_table(_UpperCAmelCase )
# If the file has one json object per line
else:
with open(_UpperCAmelCase , "rb" ) as f:
UpperCAmelCase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase_ = max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
UpperCAmelCase_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_UpperCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase_ = batch.decode(self.config.encoding , errors=_UpperCAmelCase ).encode("utf-8" )
try:
while True:
try:
UpperCAmelCase_ = paj.read_json(
io.BytesIO(_UpperCAmelCase ) , read_options=paj.ReadOptions(block_size=_UpperCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_UpperCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(_UpperCAmelCase )
or block_size > len(_UpperCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(_UpperCAmelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_UpperCAmelCase , _UpperCAmelCase ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase_ = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys}
UpperCAmelCase_ = pa.Table.from_pydict(_UpperCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(_UpperCAmelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCAmelCase )
batch_idx += 1
| 241 | 1 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[int] = ["image_processor", "tokenizer"]
snake_case__ : Union[str, Any] = "OwlViTImageProcessor"
snake_case__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = kwargs.pop("feature_extractor" )
__SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]="max_length" , UpperCAmelCase__ : Optional[Any]="np" , **UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(text[0] , UpperCAmelCase__ )):
__SCREAMING_SNAKE_CASE = [self.tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )]
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(text[0] , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = []
# Maximum number of queries across batch
__SCREAMING_SNAKE_CASE = max([len(UpperCAmelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase__ ) != max_num_queries:
__SCREAMING_SNAKE_CASE = t + [" "] * (max_num_queries - len(UpperCAmelCase__ ))
__SCREAMING_SNAKE_CASE = self.tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
encodings.append(UpperCAmelCase__ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__SCREAMING_SNAKE_CASE = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__SCREAMING_SNAKE_CASE = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__SCREAMING_SNAKE_CASE = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__SCREAMING_SNAKE_CASE = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__SCREAMING_SNAKE_CASE = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__SCREAMING_SNAKE_CASE = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__SCREAMING_SNAKE_CASE = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__SCREAMING_SNAKE_CASE = BatchEncoding()
__SCREAMING_SNAKE_CASE = input_ids
__SCREAMING_SNAKE_CASE = attention_mask
if query_images is not None:
__SCREAMING_SNAKE_CASE = BatchEncoding()
__SCREAMING_SNAKE_CASE = self.image_processor(
UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ).pixel_values
__SCREAMING_SNAKE_CASE = query_pixel_values
if images is not None:
__SCREAMING_SNAKE_CASE = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
__SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ) -> List[str]:
return self.image_processor.post_process(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] ) -> Tuple:
return self.image_processor.post_process_object_detection(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ) -> List[Any]:
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[int] ) -> Tuple:
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def UpperCAmelCase_ ( self : Any ) -> str:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self : List[str] ) -> str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase__ , )
return self.image_processor
| 54 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Dict = PegasusConfig
snake_case__ : Union[str, Any] = {}
snake_case__ : Any = "gelu"
def __init__( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : List[Any]=9_9 , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : List[Any]=2_0 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Optional[Any]=0 , ) -> Any:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__SCREAMING_SNAKE_CASE = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = np.concatenate([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE = 2_0
__SCREAMING_SNAKE_CASE = model_class_name(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = 2_0
__SCREAMING_SNAKE_CASE = model_class_name(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
if attention_mask is None:
__SCREAMING_SNAKE_CASE = np.not_equal(lowerCAmelCase_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Tuple = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case__ : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case__ : Tuple = True
snake_case__ : Union[str, Any] = False
snake_case__ : int = False
snake_case__ : List[Any] = False
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = FlaxPegasusModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : int ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__SCREAMING_SNAKE_CASE = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__SCREAMING_SNAKE_CASE = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest("JIT Enabled" ):
__SCREAMING_SNAKE_CASE = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("google/pegasus-large" , from_pt=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.ones((1, 1) )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__SCREAMING_SNAKE_CASE = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__SCREAMING_SNAKE_CASE = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="np" , truncation=UpperCAmelCase__ , max_length=5_1_2 , padding=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.generate(**UpperCAmelCase__ , num_beams=2 ).sequences
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
assert tgt_text == decoded
| 54 | 1 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _lowerCamelCase ( lowercase : Tuple=32 , lowercase : Tuple=10 , lowercase : Dict=100 , lowercase : Tuple=1026 , lowercase : Union[str, Any]=True , lowercase : Optional[int]="data/tokenized_stories_train_wikitext103.jbl" , lowercase : Tuple="igf_context_pairs.jbl" , ) -> int:
set_seed(3 )
# generate train_data and objective_set
_a , _a = generate_datasets(
lowercase , lowercase , number=lowercase , min_len=1026 , trim=lowercase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_a = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
_a = load_gpta("gpt2" ).to(lowercase )
print("computing perplexity on objective set" )
_a = compute_perplexity(lowercase , lowercase , lowercase ).item()
print("perplexity on objective set:" , lowercase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Union[str, Any]=15 , lowercase : int=128 , lowercase : Union[str, Any]=100 , lowercase : Any="igf_model.pt" , ) -> Optional[Any]:
set_seed(42 )
# Load pre-trained model
_a = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
_a = SecondaryLearner(lowercase )
# Train secondary learner
_a = train_secondary_learner(
lowercase , lowercase , max_epochs=lowercase , batch_size=lowercase , eval_freq=100 , igf_model_path=lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : List[str] , lowercase : List[str] , lowercase : Tuple=32 , lowercase : List[Any]=1000 , lowercase : List[Any]=16 , lowercase : Any=1.0 , lowercase : int=recopy_gpta , lowercase : Optional[Any]=None , lowercase : Optional[int]=10 , lowercase : List[str]="gpt2_finetuned.pt" , ) -> Optional[int]:
_a = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
_a = RandomSampler(lowercase )
_a = DataLoader(lowercase , sampler=lowercase )
_a = max_steps // (len(lowercase )) + 1
_a = 0
_a = torch.zeros((1, context_len) , dtype=torch.long , device=lowercase )
_a , _a , _a = recopy_model(lowercase , lowercase , lowercase )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowercase )
secondary_learner.eval()
_a = []
_a = 0
_a = []
_a = []
# Compute the performance of the transformer model at the beginning
_a = compute_perplexity(lowercase , lowercase , lowercase )
test_perps.append(lowercase )
print("Test perplexity, step" , lowercase , ":" , lowercase )
for epoch in range(int(lowercase ) ):
for step, example in enumerate(lowercase ):
torch.cuda.empty_cache()
_a = random.randint(0 , example.size(2 ) - context_len - 1 )
_a = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_a = model(lowercase , labels=lowercase )
_a = True
if secondary_learner is not None:
_a = secondary_learner.forward(
torch.tensor(lowercase , dtype=torch.long , device=lowercase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowercase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_a = -1
if predicted_q < threshold:
_a = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_a = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_a = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_a = compute_perplexity(lowercase , lowercase , lowercase )
test_perps.append(lowercase )
print("Test perplexity, step" , lowercase , ":" , lowercase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowercase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=lowercase , type=lowercase , required=lowercase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=lowercase , type=lowercase , required=lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=lowercase , default=lowercase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=lowercase , default=lowercase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=lowercase , type=lowercase , required=lowercase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=lowercase , type=lowercase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=lowercase , default=lowercase , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=lowercase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=lowercase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=lowercase , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=lowercase , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=lowercase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=lowercase , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=lowercase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=lowercase , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=lowercase , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=lowercase , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=lowercase , type=lowercase , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=lowercase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=lowercase , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=lowercase , type=lowercase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=lowercase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
_a = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
_a = training_secondary_learner(
lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
_a = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_a , _a = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=lowercase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowercase , lowercase , lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=lowercase , secondary_learner=lowercase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 346 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = '▁'
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['input_ids', 'attention_mask']
__a =[]
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : Dict , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : str , __a : List[str] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__a )
_a = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : str , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 346 | 1 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowerCAmelCase_ = {
'E': 1_2.7_0,
'T': 9.0_6,
'A': 8.1_7,
'O': 7.5_1,
'I': 6.9_7,
'N': 6.7_5,
'S': 6.3_3,
'H': 6.0_9,
'R': 5.9_9,
'D': 4.2_5,
'L': 4.0_3,
'C': 2.7_8,
'U': 2.7_6,
'M': 2.4_1,
'W': 2.3_6,
'F': 2.2_3,
'G': 2.0_2,
'Y': 1.9_7,
'P': 1.9_3,
'B': 1.2_9,
'V': 0.9_8,
'K': 0.7_7,
'J': 0.1_5,
'X': 0.1_5,
'Q': 0.1_0,
'Z': 0.0_7,
}
lowerCAmelCase_ = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
lowerCAmelCase_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def snake_case( __magic_name__ ) -> dict[str, int]:
'''simple docstring'''
lowercase : Union[str, Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
return x[0]
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Optional[int] = get_letter_count(__magic_name__ )
lowercase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__magic_name__ )
lowercase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__magic_name__ )
lowercase : Any = ''''''.join(freq_to_letter[freq] )
lowercase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__magic_name__ , reverse=__magic_name__ )
lowercase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__magic_name__ )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Tuple = get_frequency_order(__magic_name__ )
lowercase : Optional[int] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 308 |
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink)) | 308 | 1 |
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: str =[0] * len(__a )
lowerCamelCase__: Dict =[]
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: int =0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__a ) ):
if indegree[i] == 0:
queue.append(__a )
while queue:
lowerCamelCase__: Optional[int] =queue.pop(0 )
cnt += 1
topo.append(__a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__a )
if cnt != len(__a ):
print("Cycle exists" )
else:
print(__a )
# Adjacency List of Graph
__A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 273 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A = logging.getLogger(__name__)
if __name__ == "__main__":
__A = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_0522, type=int)
__A = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, "rb") as fp:
__A = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__A = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A = [0] * args.vocab_size
for k, v in counter.items():
__A = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 273 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
A_ : Optional[Any] = get_logger()
A_ : Optional[dict] = None
class lowercase ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
"""simple docstring"""
def __init__( self ,a_=None ,a_=None ,**a_ ) -> str:
super().__init__(features=a_ )
import jax
from jaxlib.xla_client import Device
if isinstance(a_ ,a_ ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(a_ )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase : Dict = device if isinstance(a_ ,a_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase : Union[str, Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
_UpperCAmelCase : Dict = str(jax.devices()[0] )
_UpperCAmelCase : Tuple = jnp_array_kwargs
@staticmethod
def _snake_case ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(a_ ): device for device in jax.devices()}
def _snake_case ( self ,a_ ) -> Optional[Any]:
import jax
import jax.numpy as jnp
if isinstance(a_ ,a_ ) and column:
if all(
isinstance(a_ ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a_ ,axis=0 )
return column
def _snake_case ( self ,a_ ) -> Optional[int]:
import jax
import jax.numpy as jnp
if isinstance(a_ ,(str, bytes, type(a_ )) ):
return value
elif isinstance(a_ ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_UpperCAmelCase : Dict = {}
if isinstance(a_ ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase : List[str] = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase : List[str] = {"""dtype""": jnp.intaa}
elif isinstance(a_ ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_UpperCAmelCase : Dict = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a_ ,PIL.Image.Image ):
_UpperCAmelCase : Optional[Any] = np.asarray(a_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase : Any = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a_ ,**{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self ,a_ ) -> Optional[int]:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a_ ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a_ ,"""__array__""" ) and not isinstance(a_ ,jax.Array ):
_UpperCAmelCase : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a_ ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
elif isinstance(a_ ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
return self._tensorize(a_ )
def _snake_case ( self ,a_ ) -> List[str]:
return map_nested(self._recursive_tensorize ,a_ ,map_list=a_ )
def _snake_case ( self ,a_ ) -> Mapping:
_UpperCAmelCase : Tuple = self.numpy_arrow_extractor().extract_row(a_ )
_UpperCAmelCase : List[str] = self.python_features_decoder.decode_row(a_ )
return self.recursive_tensorize(a_ )
def _snake_case ( self ,a_ ) -> "jax.Array":
_UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_column(a_ )
_UpperCAmelCase : List[Any] = self.python_features_decoder.decode_column(a_ ,pa_table.column_names[0] )
_UpperCAmelCase : Any = self.recursive_tensorize(a_ )
_UpperCAmelCase : Optional[int] = self._consolidate(a_ )
return column
def _snake_case ( self ,a_ ) -> Mapping:
_UpperCAmelCase : str = self.numpy_arrow_extractor().extract_batch(a_ )
_UpperCAmelCase : str = self.python_features_decoder.decode_batch(a_ )
_UpperCAmelCase : str = self.recursive_tensorize(a_ )
for column_name in batch:
_UpperCAmelCase : Dict = self._consolidate(batch[column_name] )
return batch
| 215 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Any = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """markuplm"""
def __init__( self ,a_=30_522 ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=0 ,a_=2 ,a_=256 ,a_=1_024 ,a_=216 ,a_=1_001 ,a_=32 ,a_=50 ,a_="absolute" ,a_=True ,a_=None ,**a_ ,) -> Union[str, Any]:
super().__init__(
pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,**a_ ,)
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : Optional[Any] = position_embedding_type
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : List[Any] = classifier_dropout
# additional properties
_UpperCAmelCase : Dict = max_depth
_UpperCAmelCase : Union[str, Any] = max_xpath_tag_unit_embeddings
_UpperCAmelCase : Optional[int] = max_xpath_subs_unit_embeddings
_UpperCAmelCase : List[Any] = tag_pad_id
_UpperCAmelCase : Tuple = subs_pad_id
_UpperCAmelCase : List[str] = xpath_unit_hidden_size
| 215 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
def __init__( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__snake_case : Any = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self : str , _lowerCAmelCase : List[Any] = 1 , _lowerCAmelCase : Union[str, Any] = None , _lowerCAmelCase : Dict = 0.0 , _lowerCAmelCase : Dict = 50 , _lowerCAmelCase : List[str] = None , _lowerCAmelCase : Optional[int] = "pil" , _lowerCAmelCase : Union[str, Any] = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , _lowerCAmelCase ):
__snake_case : List[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__snake_case : Union[str, Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case : Tuple = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__snake_case : Union[str, Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__snake_case : str = self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , eta=_lowerCAmelCase , use_clipped_model_output=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__snake_case : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : List[Any] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 352 | from __future__ import annotations
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__snake_case : Optional[Any] = math.log(len(__SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def _lowerCamelCase( a ):
return choice(__a )
def _lowerCamelCase( a , a ):
__a = random_pivot(__a )
# partition based on pivot
# linear time
__a = [e for e in lst if e < pivot]
__a = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__a ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__a ) < k - 1:
return kth_number(__a , k - len(__a ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__a , __a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (PNDMScheduler,)
__UpperCamelCase = (("num_inference_steps", 5_0),)
def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase_)
return config
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]=0 , **lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''num_inference_steps''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class.from_pretrained(lowercase_)
new_scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Optional[Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]=0 , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('''num_inference_steps''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_)
SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(lowercase_)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_)
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Any = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self : str , **lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = 10
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_)
for i, t in enumerate(scheduler.prk_timesteps):
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_).prev_sample
return sample
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''num_inference_steps''' , lowercase_)
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps'''):
scheduler.set_timesteps(lowercase_)
elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps'''):
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
SCREAMING_SNAKE_CASE_ : Optional[int] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Any = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config(steps_offset=1)
SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**lowercase_)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : str = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
SCREAMING_SNAKE_CASE_ : int = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(lowercase_):
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**lowercase_)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.full_loop()
SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_98.13_18) < 1e-2
assert abs(result_mean.item() - 0.25_80) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.full_loop(prediction_type='''v_prediction''')
SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 67.39_86) < 1e-2
assert abs(result_mean.item() - 0.08_78) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01)
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 2_30.03_99) < 1e-2
assert abs(result_mean.item() - 0.29_95) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01)
SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_86.94_82) < 1e-2
assert abs(result_mean.item() - 0.24_34) < 1e-3
| 91 | 0 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __a ( snake_case__, snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , *,
lowercase_ : int = 4 , lowercase_ : int = 768 , lowercase_ : int , lowercase_ : int , ):
super().__init__()
UpperCamelCase__ : Optional[int] =nn.Parameter(torch.zeros(lowercase_ ) )
# parameters for additional clip time embeddings
UpperCamelCase__ : Dict =nn.Linear(lowercase_ , lowercase_ )
UpperCamelCase__ : List[str] =nn.Linear(lowercase_ , lowercase_ )
# parameters for encoder hidden states
UpperCamelCase__ : Tuple =clip_extra_context_tokens
UpperCamelCase__ : Tuple =nn.Linear(
lowercase_ , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase__ : Optional[int] =nn.Linear(lowercase_ , lowercase_ )
UpperCamelCase__ : Dict =nn.LayerNorm(lowercase_ )
def _lowerCAmelCase ( self : List[str] , *, lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[str] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase__ : int =image_embeddings.shape[0]
UpperCamelCase__ : Tuple =self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase__ : List[Any] =classifier_free_guidance_embeddings.expand(
lowercase_ , -1 )
UpperCamelCase__ : str =torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase__ : Any =prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase__ : Tuple =self.embedding_proj(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.clip_image_embeddings_project_to_time_embeddings(lowercase_ )
UpperCamelCase__ : str =time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase__ : int =self.clip_extra_context_tokens_proj(lowercase_ )
UpperCamelCase__ : str =clip_extra_context_tokens.reshape(lowercase_ , -1 , self.clip_extra_context_tokens )
UpperCamelCase__ : str =clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase__ : str =self.encoder_hidden_states_proj(lowercase_ )
UpperCamelCase__ : int =self.text_encoder_hidden_states_norm(lowercase_ )
UpperCamelCase__ : List[str] =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 157 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
"""simple docstring"""
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]=3 , lowercase_ : Tuple=32 , lowercase_ : List[str]=3 , lowercase_ : str=10 , lowercase_ : Tuple=[10, 20, 30, 40] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]="relu" , lowercase_ : Optional[int]=3 , lowercase_ : List[Any]=None , ):
UpperCamelCase__ : Tuple =parent
UpperCamelCase__ : str =batch_size
UpperCamelCase__ : List[Any] =image_size
UpperCamelCase__ : Union[str, Any] =num_channels
UpperCamelCase__ : Dict =embeddings_size
UpperCamelCase__ : Tuple =hidden_sizes
UpperCamelCase__ : List[Any] =depths
UpperCamelCase__ : List[Any] =is_training
UpperCamelCase__ : Union[str, Any] =use_labels
UpperCamelCase__ : Optional[int] =hidden_act
UpperCamelCase__ : Dict =num_labels
UpperCamelCase__ : List[str] =scope
UpperCamelCase__ : Optional[Any] =len(lowercase_ )
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Tuple =None
if self.use_labels:
UpperCamelCase__ : str =ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : List[str] =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Tuple ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
UpperCamelCase__ : Any =RegNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Union[str, Any] =model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple ):
UpperCamelCase__ : List[str] =self.num_labels
UpperCamelCase__ : Optional[Any] =RegNetForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Optional[int] =model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : List[Any] =self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[Any] =config_and_inputs
UpperCamelCase__ : str ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __a ( snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : Optional[int] =RegNetModelTester(self )
UpperCamelCase__ : Optional[int] =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Dict ):
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _lowerCAmelCase ( self : Dict ):
pass
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ , UpperCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] =model_class(lowercase_ )
UpperCamelCase__ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : List[str] =[*signature.parameters.keys()]
UpperCamelCase__ : str =['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ , UpperCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] =model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _lowerCAmelCase ( self : List[Any] ):
def check_hidden_states_output(lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[Any] ):
UpperCamelCase__ : int =model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ : List[str] =model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCamelCase__ : Tuple =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : Optional[int] =self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase__ , UpperCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[Any] =['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ : Dict =layer_type
UpperCamelCase__ : Dict =True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : int =True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def _lowerCAmelCase ( self : Any ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Union[str, Any] =RegNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : int ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : List[Any] =RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.default_image_processor
UpperCamelCase__ : Any =prepare_img()
UpperCamelCase__ : Optional[Any] =image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict =model(**lowercase_ )
# verify the logits
UpperCamelCase__ : Union[str, Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCamelCase__ : Union[str, Any] =torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
| 157 | 1 |
SCREAMING_SNAKE_CASE__ : Tuple = 65_521
def __magic_name__ ( __lowerCAmelCase : Dict ) -> int:
__lowerCamelCase = 1
__lowerCamelCase = 0
for plain_chr in plain_text:
__lowerCamelCase = (a + ord(__lowerCAmelCase )) % MOD_ADLER
__lowerCamelCase = (b + a) % MOD_ADLER
return (b << 16) | a
| 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : List[Any] = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 161 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = False, __magic_name__ = False, __magic_name__ = None, __magic_name__ = None, **__magic_name__, ) -> Tuple:
"""simple docstring"""
super().__init__(
_snake_case, split=_snake_case, features=_snake_case, cache_dir=_snake_case, keep_in_memory=_snake_case, streaming=_snake_case, num_proc=_snake_case, **_snake_case, )
UpperCamelCase__ : str = field
UpperCamelCase__ : Union[str, Any] = path_or_paths if isinstance(_snake_case, _snake_case ) else {self.split: path_or_paths}
UpperCamelCase__ : Union[str, Any] = Json(
cache_dir=_snake_case, data_files=_snake_case, features=_snake_case, field=_snake_case, **_snake_case, )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
if self.streaming:
UpperCamelCase__ : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : List[Any] = None
UpperCamelCase__ : str = None
UpperCamelCase__ : Optional[Any] = None
self.builder.download_and_prepare(
download_config=_snake_case, download_mode=_snake_case, verification_mode=_snake_case, base_path=_snake_case, num_proc=self.num_proc, )
UpperCamelCase__ : int = self.builder.as_dataset(
split=self.split, verification_mode=_snake_case, in_memory=self.keep_in_memory )
return dataset
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = None, **__magic_name__, ) -> int:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
UpperCamelCase__ : Optional[int] = dataset
UpperCamelCase__ : Optional[Any] = path_or_buf
UpperCamelCase__ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase__ : Optional[Any] = num_proc
UpperCamelCase__ : str = '''utf-8'''
UpperCamelCase__ : str = to_json_kwargs
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.to_json_kwargs.pop('''path_or_buf''', _snake_case )
UpperCamelCase__ : List[str] = self.to_json_kwargs.pop('''orient''', '''records''' )
UpperCamelCase__ : List[str] = self.to_json_kwargs.pop('''lines''', True if orient == '''records''' else False )
UpperCamelCase__ : str = self.to_json_kwargs.pop('''index''', False if orient in ['''split''', '''table'''] else True )
UpperCamelCase__ : List[str] = self.to_json_kwargs.pop('''compression''', _snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf, (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf, '''wb''', compression=_snake_case ) as buffer:
UpperCamelCase__ : str = self._write(file_obj=_snake_case, orient=_snake_case, lines=_snake_case, index=_snake_case, **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
''' was passed. Please provide a local path instead.''' )
UpperCamelCase__ : Union[str, Any] = self._write(
file_obj=self.path_or_buf, orient=_snake_case, lines=_snake_case, index=_snake_case, **self.to_json_kwargs )
return written
def UpperCamelCase__ ( self, __magic_name__ ) -> Dict:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = args
UpperCamelCase__ : Optional[int] = query_table(
table=self.dataset.data, key=slice(_snake_case, offset + self.batch_size ), indices=self.dataset._indices, )
UpperCamelCase__ : List[str] = batch.to_pandas().to_json(
path_or_buf=_snake_case, orient=_snake_case, lines=_snake_case, index=_snake_case, **_snake_case )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, **__magic_name__, ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset ), self.batch_size ), unit='''ba''', disable=not logging.is_progress_bar_enabled(), desc='''Creating json from Arrow format''', ):
UpperCamelCase__ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCamelCase__ ,UpperCamelCase__ : Dict = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json, [(offset, orient, lines, index, to_json_kwargs) for offset in range(0, _snake_case, _snake_case )], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit='''ba''', disable=not logging.is_progress_bar_enabled(), desc='''Creating json from Arrow format''', ):
written += file_obj.write(_snake_case )
return written
| 357 |
from __future__ import annotations
import bisect
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int = 0 , __UpperCAmelCase: int = -1 ) -> int:
if hi < 0:
UpperCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
while lo < hi:
UpperCamelCase__ : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
UpperCamelCase__ : Optional[int] = mid + 1
else:
UpperCamelCase__ : Tuple = mid
return lo
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int = 0 , __UpperCAmelCase: int = -1 ) -> int:
if hi < 0:
UpperCamelCase__ : int = len(__UpperCAmelCase )
while lo < hi:
UpperCamelCase__ : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
UpperCamelCase__ : Optional[int] = mid + 1
else:
UpperCamelCase__ : int = mid
return lo
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int = 0 , __UpperCAmelCase: int = -1 ) -> None:
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int = 0 , __UpperCAmelCase: int = -1 ) -> None:
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int ) -> int | None:
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : List[str] = len(__UpperCAmelCase ) - 1
while left <= right:
UpperCamelCase__ : List[str] = left + (right - left) // 2
UpperCamelCase__ : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
UpperCamelCase__ : List[str] = midpoint - 1
else:
UpperCamelCase__ : List[str] = midpoint + 1
return None
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int ) -> int | None:
UpperCamelCase__ : Union[str, Any] = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int ) -> int | None:
if right < left:
return None
UpperCamelCase__ : Optional[int] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by comma:\n').strip()
UpperCAmelCase_ = sorted(int(item) for item in user_input.split(','))
UpperCAmelCase_ = int(input('Enter a single number to be found in the list:\n'))
UpperCAmelCase_ = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 247 | 0 |
def A_ ( a , a ):
"""simple docstring"""
if not isinstance(a , a ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(a , a ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
SCREAMING_SNAKE_CASE_ : str = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(a )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 |
def A_ ( a ):
"""simple docstring"""
return "".join(chr(ord(a ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 253 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowercase( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
# vision encoder
if "img_encoder.pos_embed" in name:
UpperCamelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
UpperCamelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
UpperCamelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
UpperCamelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
UpperCamelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
UpperCamelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
UpperCamelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
UpperCamelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
UpperCamelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
UpperCamelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
UpperCamelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
UpperCamelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
UpperCamelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
UpperCamelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
UpperCamelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
UpperCamelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
UpperCamelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
UpperCamelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
UpperCamelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
UpperCamelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
UpperCamelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
UpperCamelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
UpperCamelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
UpperCamelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCamelCase = key.split(""".""" )
UpperCamelCase , UpperCamelCase = int(key_split[2] ), int(key_split[4] )
UpperCamelCase = config.vision_config.hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCamelCase = key.split(""".""" )
UpperCamelCase = int(key_split[3] )
UpperCamelCase = config.text_config.hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = rename_key(UpperCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
UpperCamelCase = val.squeeze_()
else:
UpperCamelCase = val
return orig_state_dict
def lowercase( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="groupvit-gcc-yfcc" , UpperCamelCase_=False ) -> str:
'''simple docstring'''
UpperCamelCase = GroupViTConfig()
UpperCamelCase = GroupViTModel(UpperCamelCase_ ).eval()
UpperCamelCase = torch.load(UpperCamelCase_ , map_location="""cpu""" )["""model"""]
UpperCamelCase = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0)
# verify result
UpperCamelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
UpperCamelCase = prepare_img()
UpperCamelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="""pt""" )
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
UpperCamelCase = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
UpperCamelCase = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(f"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print("""Successfully saved processor and model to""" , UpperCamelCase_ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(UpperCamelCase_ , organization="""nielsr""" )
model.push_to_hub(UpperCamelCase_ , organization="""nielsr""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 165 | from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int = 6_5536 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : str = "fourier" , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase_ : Tuple[str] = "UNetMidBlock1D" , lowerCamelCase_ : str = None , lowerCamelCase_ : Tuple[int] = (32, 32, 64) , lowerCamelCase_ : str = None , lowerCamelCase_ : int = 8 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : bool = False , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = sample_size
# time
if time_embedding_type == "fourier":
UpperCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase_ , log=lowerCamelCase_ , flip_sin_to_cos=lowerCamelCase_ )
UpperCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase_ , downscale_freq_shift=lowerCamelCase_ )
UpperCamelCase = block_out_channels[0]
if use_timestep_embedding:
UpperCamelCase = block_out_channels[0] * 4
UpperCamelCase = TimestepEmbedding(
in_channels=lowerCamelCase_ , time_embed_dim=lowerCamelCase_ , act_fn=lowerCamelCase_ , out_dim=block_out_channels[0] , )
UpperCamelCase = nn.ModuleList([] )
UpperCamelCase = None
UpperCamelCase = nn.ModuleList([] )
UpperCamelCase = None
# down
UpperCamelCase = in_channels
for i, down_block_type in enumerate(lowerCamelCase_ ):
UpperCamelCase = output_channel
UpperCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCamelCase = i == len(lowerCamelCase_ ) - 1
UpperCamelCase = get_down_block(
lowerCamelCase_ , num_layers=lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase_ )
# mid
UpperCamelCase = get_mid_block(
lowerCamelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase_ , add_downsample=lowerCamelCase_ , )
# up
UpperCamelCase = list(reversed(lowerCamelCase_ ) )
UpperCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
UpperCamelCase = out_channels
else:
UpperCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
UpperCamelCase = output_channel
UpperCamelCase = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase_ ) - 1 else final_upsample_channels
)
UpperCamelCase = i == len(lowerCamelCase_ ) - 1
UpperCamelCase = get_up_block(
lowerCamelCase_ , num_layers=lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase_ )
UpperCamelCase = output_channel
# out
UpperCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
UpperCamelCase = get_out_block(
out_block_type=lowerCamelCase_ , num_groups_out=lowerCamelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase_ , act_fn=lowerCamelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Union[torch.Tensor, float, int] , lowerCamelCase_ : bool = True , ):
"""simple docstring"""
UpperCamelCase = timestep
if not torch.is_tensor(lowerCamelCase_ ):
UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(sample.device )
UpperCamelCase = self.time_proj(lowerCamelCase_ )
if self.config.use_timestep_embedding:
UpperCamelCase = self.time_mlp(lowerCamelCase_ )
else:
UpperCamelCase = timestep_embed[..., None]
UpperCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
UpperCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
UpperCamelCase = ()
for downsample_block in self.down_blocks:
UpperCamelCase , UpperCamelCase = downsample_block(hidden_states=lowerCamelCase_ , temb=lowerCamelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCamelCase = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
UpperCamelCase = down_block_res_samples[-1:]
UpperCamelCase = down_block_res_samples[:-1]
UpperCamelCase = upsample_block(lowerCamelCase_ , res_hidden_states_tuple=lowerCamelCase_ , temb=lowerCamelCase_ )
# 5. post-process
if self.out_block:
UpperCamelCase = self.out_block(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase_ )
| 165 | 1 |
"""simple docstring"""
from itertools import permutations
def __a ( __lowerCamelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCAmelCase_ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(__lowerCamelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __a ( __lowerCamelCase = 10 ):
return sum(
int("".join(map(__lowerCamelCase, __lowerCamelCase ) ) )
for num in permutations(range(__lowerCamelCase ) )
if is_substring_divisible(__lowerCamelCase ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 61 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = "data2vec-vision"
def __init__( self : Optional[int] , lowerCamelCase : int=768 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : Union[str, Any]=3072 , lowerCamelCase : Tuple="gelu" , lowerCamelCase : List[Any]=0.0 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : int=1E-12 , lowerCamelCase : Optional[int]=224 , lowerCamelCase : List[str]=16 , lowerCamelCase : str=3 , lowerCamelCase : Any=False , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]=False , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Dict=True , lowerCamelCase : Tuple=[3, 5, 7, 11] , lowerCamelCase : Union[str, Any]=[1, 2, 3, 6] , lowerCamelCase : List[str]=True , lowerCamelCase : int=0.4 , lowerCamelCase : Optional[int]=256 , lowerCamelCase : Tuple=1 , lowerCamelCase : Tuple=False , lowerCamelCase : Any=255 , **lowerCamelCase : str , ) -> Optional[int]:
super().__init__(**lowerCamelCase )
__snake_case : Dict = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : int = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Tuple = image_size
__snake_case : Tuple = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Optional[Any] = use_mask_token
__snake_case : Dict = use_absolute_position_embeddings
__snake_case : Optional[Any] = use_relative_position_bias
__snake_case : Any = use_shared_relative_position_bias
__snake_case : Union[str, Any] = layer_scale_init_value
__snake_case : List[Any] = drop_path_rate
__snake_case : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
__snake_case : Optional[int] = out_indices
__snake_case : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__snake_case : int = use_auxiliary_head
__snake_case : Optional[Any] = auxiliary_loss_weight
__snake_case : Optional[int] = auxiliary_channels
__snake_case : str = auxiliary_num_convs
__snake_case : Any = auxiliary_concat_input
__snake_case : Optional[Any] = semantic_loss_ignore_index
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = version.parse("1.11" )
@property
def __snake_case ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __snake_case ( self : List[Any] ) -> float:
return 1E-4
| 123 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def A (__A : Any ) -> Any:
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def A () -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=__A )
UpperCAmelCase_ = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__A )
EnvironmentCommand.register_subcommand(__A )
TestCommand.register_subcommand(__A )
RunBeamCommand.register_subcommand(__A )
DummyDataCommand.register_subcommand(__A )
# Parse args
UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_known_args()
if not hasattr(__A , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase_ = parse_unknown_args(__A )
# Run
UpperCAmelCase_ = args.func(__A , **__A )
service.run()
if __name__ == "__main__":
main()
| 363 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = question_encoder
UpperCAmelCase_ = generator
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
if os.path.isfile(_snake_case):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_snake_case , exist_ok=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''')
UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(_snake_case)
self.generator.save_pretrained(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case)
if config is None:
UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=_snake_case , generator=_snake_case)
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.generator
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase_ = labels['''input_ids''']
return model_inputs
| 7 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCAmelCase (__A):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def lowerCAmelCase (__A):
"""simple docstring"""
for char in word:
_a = ord(__A)
if not _is_chinese_char(__A):
return 0
return 1
def lowerCAmelCase (__A):
"""simple docstring"""
_a = set()
for token in tokens:
_a = len(__A) > 1 and is_chinese(__A)
if chinese_word:
word_set.add(__A)
_a = list(__A)
return word_list
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a = max([len(__A) for w in chinese_word_set])
_a = bert_tokens
_a , _a = 0, len(__A)
while start < end:
_a = True
if is_chinese(bert_word[start]):
_a = min(end - start , __A)
for i in range(__A , 1 , -1):
_a = ''''''.join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
_a = '''##''' + bert_word[j]
_a = start + i
_a = False
break
if single_word:
start += 1
return bert_word
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a = []
for i in range(0 , len(__A) , 100):
_a = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws''']).cws
_a = [get_chinese_word(__A) for r in res]
ltp_res.extend(__A)
assert len(__A) == len(__A)
_a = []
for i in range(0 , len(__A) , 100):
_a = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__A , truncation=__A , max_length=512)
bert_res.extend(res['''input_ids'''])
assert len(__A) == len(__A)
_a = []
for input_ids, chinese_word in zip(__A , __A):
_a = []
for id in input_ids:
_a = bert_tokenizer._convert_id_to_token(__A)
input_tokens.append(__A)
_a = add_sub_symbol(__A , __A)
_a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__A):
if token[:2] == "##":
_a = token[2:]
# save chinese tokens' pos
if len(__A) == 1 and _is_chinese_char(ord(__A)):
ref_id.append(__A)
ref_ids.append(__A)
assert len(__A) == len(__A)
return ref_ids
def lowerCAmelCase (__A):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''') as f:
_a = f.readlines()
_a = [line.strip() for line in data if len(__A) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a = LTP(args.ltp) # faster in GPU device
_a = BertTokenizer.from_pretrained(args.bert)
_a = prepare_ref(__A , __A , __A)
with open(args.save_path , '''w''' , encoding='''utf-8''') as f:
_a = [json.dumps(__A) + '''\n''' for ref in ref_ids]
f.writelines(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
lowercase_ = parser.parse_args()
main(args)
| 211 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __A ( A , A ):
'''simple docstring'''
__lowerCamelCase : Tuple = 'resnet'
__lowerCamelCase : Any = ['basic', 'bottleneck']
def __init__(self , A=3 , A=64 , A=[256, 512, 1_024, 2_048] , A=[3, 4, 6, 3] , A="bottleneck" , A="relu" , A=False , A=None , A=None , **A , ) -> Dict:
"""simple docstring"""
super().__init__(**A )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_a = num_channels
_a = embedding_size
_a = hidden_sizes
_a = depths
_a = layer_type
_a = hidden_act
_a = downsample_in_first_stage
_a = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(A ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = version.parse('1.11' )
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a__ (self ) -> float:
"""simple docstring"""
return 1E-3
| 211 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = 42
a = 42
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
a = 42
a = (16, 32, 96, 2_56)
a = jnp.floataa
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE__ = []
for i in range(len(self.block_out_channels ) - 1 ):
SCREAMING_SNAKE_CASE__ = self.block_out_channels[i]
SCREAMING_SNAKE_CASE__ = self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE__ = nn.Conv(
lowerCamelCase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = nn.Conv(
lowerCamelCase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = blocks
SCREAMING_SNAKE_CASE__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , __lowerCamelCase : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = nn.silu(lowerCamelCase_ )
for block in self.blocks:
SCREAMING_SNAKE_CASE__ = block(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = nn.silu(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = self.conv_out(lowerCamelCase_ )
return embedding
@flax_register_to_config
class UpperCAmelCase__ ( nn.Module , A__ , A__ ):
"""simple docstring"""
a = 32
a = 4
a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a = False
a = (3_20, 6_40, 12_80, 12_80)
a = 2
a = 8
a = None
a = 12_80
a = 0.0
a = False
a = jnp.floataa
a = True
a = 0
a = "rgb"
a = (16, 32, 96, 2_56)
def lowercase_ ( self : List[str] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE__ = jnp.zeros(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE__ = jnp.zeros(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = jax.random.split(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )["params"]
def lowercase_ ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.block_out_channels
SCREAMING_SNAKE_CASE__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE__ = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE__ = FlaxTimestepEmbedding(lowerCamelCase_ , dtype=self.dtype )
SCREAMING_SNAKE_CASE__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
SCREAMING_SNAKE_CASE__ = self.only_cross_attention
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = block_out_channels[0]
SCREAMING_SNAKE_CASE__ = nn.Conv(
lowerCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase_ )
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE__ = output_channel
SCREAMING_SNAKE_CASE__ = block_out_channels[i]
SCREAMING_SNAKE_CASE__ = i == len(lowerCamelCase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE__ = FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE__ = FlaxDownBlockaD(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCamelCase_ )
for _ in range(self.layers_per_block ):
SCREAMING_SNAKE_CASE__ = nn.Conv(
lowerCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase_ )
if not is_final_block:
SCREAMING_SNAKE_CASE__ = nn.Conv(
lowerCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = down_blocks
SCREAMING_SNAKE_CASE__ = controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE__ = block_out_channels[-1]
SCREAMING_SNAKE_CASE__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
SCREAMING_SNAKE_CASE__ = nn.Conv(
lowerCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int = 1.0 , __lowerCamelCase : Any = True , __lowerCamelCase : Optional[int] = False , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE__ = jnp.flip(lowerCamelCase_ , axis=1 )
# 1. time
if not isinstance(lowerCamelCase_ , jnp.ndarray ):
SCREAMING_SNAKE_CASE__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCamelCase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = jnp.expand_dims(lowerCamelCase_ , 0 )
SCREAMING_SNAKE_CASE__ = self.time_proj(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = self.time_embedding(lowerCamelCase_ )
# 2. pre-process
SCREAMING_SNAKE_CASE__ = jnp.transpose(lowerCamelCase_ , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE__ = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = jnp.transpose(lowerCamelCase_ , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE__ = self.controlnet_cond_embedding(lowerCamelCase_ )
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE__ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = down_block(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , deterministic=not train )
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = down_block(lowerCamelCase_ , lowerCamelCase_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE__ = self.mid_block(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , deterministic=not train )
# 5. contronet blocks
SCREAMING_SNAKE_CASE__ = ()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase_ , self.controlnet_down_blocks ):
SCREAMING_SNAKE_CASE__ = controlnet_block(lowerCamelCase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE__ = controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE__ = self.controlnet_mid_block(lowerCamelCase_ )
# 6. scaling
SCREAMING_SNAKE_CASE__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase_ , mid_block_res_sample=lowerCamelCase_ )
| 355 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(_A )
# Let's go
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if not hasattr(_A , '''func''' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE__ = args.func(_A )
service.run()
if __name__ == "__main__":
main()
| 218 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
_UpperCamelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
_UpperCamelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
_UpperCamelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : List[str] ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 1
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = True
def __call__( self : str ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,snake_case : nn.Module ):
super().__init__()
SCREAMING_SNAKE_CASE =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
SCREAMING_SNAKE_CASE =len(snake_case ) + 1
feature_blocks.append((f'res{block_index}', v) )
SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ):
return get_trunk_forward_outputs(
snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ):
SCREAMING_SNAKE_CASE =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] ,snake_case : str ):
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case )
SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) )
else:
SCREAMING_SNAKE_CASE =super().__getitem__(snake_case )
return val
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __getitem__( self : int ,snake_case : str ):
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE =RegNetModel
else:
SCREAMING_SNAKE_CASE =RegNetForImageClassification
return val
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func()
SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ )
our_model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =(
our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
}
SCREAMING_SNAKE_CASE =NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' )
SCREAMING_SNAKE_CASE =model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE =model_state_dict['trunk']
model.load_state_dict(lowerCAmelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE__ )
if number < 0:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
_lowerCamelCase : dict[tuple[int, int, int], int] = {}
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE__ : Tuple = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE__ : Dict = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE__ : Any = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , 0 )
SCREAMING_SNAKE_CASE__ : str = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE__ : Optional[int] = prizestrings
return prizestrings
def _a ( SCREAMING_SNAKE_CASE__ : int = 30 ) -> int:
'''simple docstring'''
return _calculate(SCREAMING_SNAKE_CASE__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 191 | 0 |
def __UpperCAmelCase ( a_ , a_):
snake_case_ = len(a_) + 1
snake_case_ = len(a_) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
snake_case_ = [[0 for i in range(a_)] for j in range(a_)]
# since string of zero length match pattern of zero length
snake_case_ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , a_):
snake_case_ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , a_):
snake_case_ = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , a_):
for j in range(1 , a_):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
snake_case_ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
snake_case_ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
snake_case_ = dp[i - 1][j]
else:
snake_case_ = 0
else:
snake_case_ = 0
return bool(dp[-1][-1])
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowercase = "aab"
lowercase = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'{input_string} matches the given pattern {pattern}')
else:
print(f'{input_string} does not match with the given pattern {pattern}')
| 178 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178 | 1 |
def _a ( a :int , a :list ) -> List[str]:
_enforce_args(a , a )
if n == 0:
return 0
a = float('''-inf''' )
for i in range(1 , n + 1 ):
a = max(
a , prices[i - 1] + naive_cut_rod_recursive(n - i , a ) )
return max_revue
def _a ( a :int , a :list ) -> int:
_enforce_args(a , a )
a = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(a , a , a )
def _a ( a :int , a :list , a :list ) -> int:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
a = float('''-inf''' )
for i in range(1 , n + 1 ):
a = max(
a , prices[i - 1] + _top_down_cut_rod_recursive(n - i , a , a ) , )
a = max_revenue
return max_rev[n]
def _a ( a :int , a :list ) -> str:
_enforce_args(a , a )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
a = [float('''-inf''' ) for _ in range(n + 1 )]
a = 0
for i in range(1 , n + 1 ):
a = max_rev[i]
for j in range(1 , i + 1 ):
a = max(a , prices[j - 1] + max_rev[i - j] )
a = max_revenue_i
return max_rev[n]
def _a ( a :int , a :list ) -> List[Any]:
if n < 0:
a = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(a )
if n > len(a ):
a = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(a )}"""
)
raise ValueError(a )
def _a ( ) -> int:
a = [6, 10, 12, 15, 20, 23]
a = len(a )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
a = 36
a = top_down_cut_rod(a , a )
a = bottom_up_cut_rod(a , a )
a = naive_cut_rod_recursive(a , a )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 |
from __future__ import annotations
def _a ( a :dict , a :str ) -> set[str]:
a , a = set(a ), [start]
while stack:
a = stack.pop()
explored.add(a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a )
return explored
UpperCAmelCase__ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : int = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 285 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase__ = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase__ = logging.get_logger(__name__)
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Optional[int] = """maskformer"""
a_ : Optional[int] = {"""hidden_size""": """mask_feature_size"""}
a_ : Optional[int] = ["""resnet""", """swin"""]
a_ : int = ["""detr"""]
def __init__( self : str , a_ : int = 2_56 , a_ : int = 2_56 , a_ : float = 0.1 , a_ : bool = False , a_ : Optional[Dict] = None , a_ : Optional[Dict] = None , a_ : float = 0.02 , a_ : float = 1.0 , a_ : float = 1.0 , a_ : float = 1.0 , a_ : float = 20.0 , a_ : Optional[bool] = None , **a_ : str , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCAmelCase_ : Tuple = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(a_ , a_ ):
lowerCAmelCase_ : Optional[Any] = backbone_config.pop("model_type" )
lowerCAmelCase_ : Any = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ : str = config_class.from_dict(a_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCAmelCase_ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowerCAmelCase_ : Optional[Any] = (
decoder_config.pop("model_type" ) if isinstance(a_ , a_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(a_ , a_ ):
lowerCAmelCase_ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowerCAmelCase_ : List[Any] = config_class.from_dict(a_ )
lowerCAmelCase_ : str = backbone_config
lowerCAmelCase_ : Tuple = decoder_config
# main feature dimension for the model
lowerCAmelCase_ : str = fpn_feature_size
lowerCAmelCase_ : str = mask_feature_size
# initializer
lowerCAmelCase_ : List[Any] = init_std
lowerCAmelCase_ : Tuple = init_xavier_std
# Hungarian matcher && loss
lowerCAmelCase_ : int = cross_entropy_weight
lowerCAmelCase_ : Dict = dice_weight
lowerCAmelCase_ : int = mask_weight
lowerCAmelCase_ : Any = use_auxiliary_loss
lowerCAmelCase_ : Dict = no_object_weight
lowerCAmelCase_ : Optional[int] = output_auxiliary_logits
lowerCAmelCase_ : int = self.decoder_config.encoder_attention_heads
lowerCAmelCase_ : str = self.decoder_config.num_hidden_layers
super().__init__(**a_ )
@classmethod
def lowerCamelCase ( cls : int , a_ : PretrainedConfig , a_ : PretrainedConfig , **a_ : Tuple ):
return cls(
backbone_config=a_ , decoder_config=a_ , **a_ , )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Optional[int] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[Any] = self.backbone_config.to_dict()
lowerCAmelCase_ : Union[str, Any] = self.decoder_config.to_dict()
lowerCAmelCase_ : List[str] = self.__class__.model_type
return output
| 241 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Any = b.T
lowercase_ : List[Any] = np.sum(np.square(lowerCamelCase_ ) , axis=1 )
lowercase_ : Dict = np.sum(np.square(lowerCamelCase_ ) , axis=0 )
lowercase_ : Optional[int] = np.matmul(lowerCamelCase_ , lowerCamelCase_ )
lowercase_ : Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : Tuple = x.reshape(-1 , 3 )
lowercase_ : str = squared_euclidean_distance(lowerCamelCase_ , lowerCamelCase_ )
return np.argmin(lowerCamelCase_ , axis=1 )
class UpperCamelCase ( lowerCamelCase__ ):
lowercase = ['pixel_values']
def __init__( self ,__UpperCamelCase = None ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = PILImageResampling.BILINEAR ,__UpperCamelCase = True ,__UpperCamelCase = True ,**__UpperCamelCase ,) -> None:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowercase_ : Optional[Any] = size if size is not None else {'''height''': 256, '''width''': 256}
lowercase_ : Optional[Any] = get_size_dict(__lowerCamelCase )
lowercase_ : List[str] = np.array(__lowerCamelCase ) if clusters is not None else None
lowercase_ : Any = do_resize
lowercase_ : List[str] = size
lowercase_ : str = resample
lowercase_ : List[str] = do_normalize
lowercase_ : Union[str, Any] = do_color_quantize
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = PILImageResampling.BILINEAR ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
lowercase_ : Union[str, Any] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__lowerCamelCase ,size=(size['height'], size['width']) ,resample=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,) -> np.ndarray:
'''simple docstring'''
lowercase_ : int = rescale(image=__lowerCamelCase ,scale=1 / 127.5 ,data_format=__lowerCamelCase )
lowercase_ : Union[str, Any] = image - 1
return image
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = ChannelDimension.FIRST ,**__UpperCamelCase ,) -> PIL.Image.Image:
'''simple docstring'''
lowercase_ : Any = do_resize if do_resize is not None else self.do_resize
lowercase_ : int = size if size is not None else self.size
lowercase_ : str = get_size_dict(__lowerCamelCase )
lowercase_ : Dict = resample if resample is not None else self.resample
lowercase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase_ : Any = clusters if clusters is not None else self.clusters
lowercase_ : Tuple = np.array(__lowerCamelCase )
lowercase_ : str = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
lowercase_ : int = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
lowercase_ : Optional[int] = [self.resize(image=__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ) for image in images]
if do_normalize:
lowercase_ : Optional[Any] = [self.normalize(image=__lowerCamelCase ) for image in images]
if do_color_quantize:
lowercase_ : Optional[Any] = [to_channel_dimension_format(__lowerCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase_ : Union[str, Any] = np.array(__lowerCamelCase )
lowercase_ : Optional[int] = color_quantize(__lowerCamelCase ,__lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowercase_ : Optional[Any] = images.shape[0]
lowercase_ : List[str] = images.reshape(__lowerCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase_ : Union[str, Any] = list(__lowerCamelCase )
else:
lowercase_ : List[Any] = [to_channel_dimension_format(__lowerCamelCase ,__lowerCamelCase ) for image in images]
lowercase_ : Tuple = {'''input_ids''': images}
return BatchFeature(data=__lowerCamelCase ,tensor_type=__lowerCamelCase )
| 356 | """simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Optional[int] =logging.get_logger(__name__)
a__ : Any =torch.device('''cpu''')
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def lowercase__ ( __lowercase : int ) -> Tuple:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : Dict ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = dct.pop(__lowercase )
__UpperCamelCase = val
def lowercase__ ( __lowercase : Optional[Any] ) -> str:
"""simple docstring"""
__UpperCamelCase = []
for k in state_dict.keys():
__UpperCamelCase = k
if ".pwconv" in k:
__UpperCamelCase = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__UpperCamelCase = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__UpperCamelCase = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__UpperCamelCase = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__UpperCamelCase = k_new.split('.' )
if ls[2].isdigit():
__UpperCamelCase = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__UpperCamelCase = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : int ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCamelCase = 1000
__UpperCamelCase = 'huggingface/label-files'
__UpperCamelCase = 'imagenet-1k-id2label.json'
__UpperCamelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase = {int(__lowercase ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__UpperCamelCase = [3, 3, 6, 4]
__UpperCamelCase = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__UpperCamelCase = [3, 3, 9, 6]
__UpperCamelCase = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__UpperCamelCase = [4, 3, 10, 5]
__UpperCamelCase = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__UpperCamelCase = [4, 4, 12, 6]
__UpperCamelCase = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__UpperCamelCase = torch.hub.load_state_dict_from_url(__lowercase , map_location='cpu' , check_hash=__lowercase )
else:
__UpperCamelCase = torch.load(__lowercase , map_location='cpu' )
__UpperCamelCase = checkpoint
__UpperCamelCase = create_rename_keys(__lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# load HuggingFace model
__UpperCamelCase = SwiftFormerForImageClassification(__lowercase ).eval()
hf_model.load_state_dict(__lowercase )
# prepare test inputs
__UpperCamelCase = prepare_img()
__UpperCamelCase = ViTImageProcessor.from_pretrained('preprocessor_config' )
__UpperCamelCase = processor(images=__lowercase , return_tensors='pt' )
# compare outputs from both models
__UpperCamelCase = get_expected_output(__lowercase )
__UpperCamelCase = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __lowercase , atol=1e-3 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
a__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
a__ : Union[str, Any] =parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 53 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : Any ):
__UpperCamelCase = data
__UpperCamelCase = None
def __iter__( self : Optional[Any] ):
__UpperCamelCase = self
__UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
__UpperCamelCase = node.next_node
@property
def _lowerCamelCase ( self : List[str] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
a__ : Dict =Node(1)
a__ : Optional[int] =Node(2)
a__ : List[str] =Node(3)
a__ : Optional[int] =Node(4)
print(root_node.has_loop) # False
a__ : str =root_node.next_node
print(root_node.has_loop) # True
a__ : Optional[int] =Node(5)
a__ : List[Any] =Node(6)
a__ : int =Node(5)
a__ : Tuple =Node(6)
print(root_node.has_loop) # False
a__ : str =Node(1)
print(root_node.has_loop) # False
| 53 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Dict ):
a__: Optional[Any] =XLMRobertaModel.from_pretrained("xlm-roberta-base" )
a__: Optional[int] =torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
a__: Tuple =torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
a__: Dict =torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__: Tuple =model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1e-3 ) )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
a__: List[Any] =XLMRobertaModel.from_pretrained("xlm-roberta-large" )
a__: Dict =torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
a__: List[Any] =torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
a__: Any =torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__: str =model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1e-3 ) )
| 358 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCAmelCase = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ):
a__: int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ):
a__: Any =random.randint(0 , len(__magic_name__ ) - 1 )
a__: Tuple =parent_a[:random_slice] + parent_a[random_slice:]
a__: List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] ):
a__: str =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a__: Union[str, Any] =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def __lowerCamelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ):
a__: List[Any] =[]
# Generate more children proportionally to the fitness score.
a__: Dict =int(parent_a[1] * 100 ) + 1
a__: Tuple =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
a__: List[str] =population_score[random.randint(0 , __magic_name__ )][0]
a__ , a__: Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
a__: Any =F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
a__: int =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a__: str =F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(__magic_name__ )
# Generate random starting population.
a__: Tuple =[]
for _ in range(__magic_name__ ):
population.append("".join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
a__ , a__: Any =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a__: Dict =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
a__: Any =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a__: Optional[int] =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
a__: List[str] =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCAmelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCAmelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 42 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = KandinskyVaaControlnetImgaImgPipeline
lowercase = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowercase = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowercase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase = False
@property
def _lowercase( self ) -> Dict:
return 32
@property
def _lowercase( self ) -> List[str]:
return 32
@property
def _lowercase( self ) -> str:
return self.time_input_dim
@property
def _lowercase( self ) -> List[str]:
return self.time_input_dim * 4
@property
def _lowercase( self ) -> Tuple:
return 100
@property
def _lowercase( self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase : Any = UNetaDConditionModel(**A )
return model
@property
def _lowercase( self ) -> Tuple:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase( self ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[Any] = self.dummy_unet
UpperCAmelCase : int = self.dummy_movq
UpperCAmelCase : List[str] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
UpperCAmelCase : int = DDIMScheduler(**A )
UpperCAmelCase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowercase( self , A , A=0 ) -> Dict:
UpperCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A )
# create init_image
UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : List[str] = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Any = torch.manual_seed(A )
else:
UpperCAmelCase : List[str] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : str = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = """cpu"""
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : Any = self.pipeline_class(**A )
UpperCAmelCase : List[str] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(A ) )
UpperCAmelCase : Any = output.images
UpperCAmelCase : Any = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
UpperCAmelCase : str = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Any = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
UpperCAmelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase : int = init_image.resize((512, 512) )
UpperCAmelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
UpperCAmelCase : Union[str, Any] = torch.from_numpy(np.array(A ) ).float() / 2_5_5.0
UpperCAmelCase : Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase : Dict = """A robot, 4k photo"""
UpperCAmelCase : int = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(A )
UpperCAmelCase : List[str] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
UpperCAmelCase : Dict = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
UpperCAmelCase : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : List[str] = pipe_prior(
A , image=A , strength=0.8_5 , generator=A , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase : str = pipeline(
image=A , image_embeds=A , negative_image_embeds=A , hint=A , generator=A , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A , A )
| 265 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a : List[str] = get_tests_dir("""fixtures""")
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase : Tuple = mock.Mock()
UpperCAmelCase : List[str] = 500
UpperCAmelCase : Any = {}
UpperCAmelCase : List[str] = HTTPError
UpperCAmelCase : str = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head:
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def _lowercase( self ) -> Union[str, Any]:
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(A )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def _lowercase( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> Optional[int]:
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 265 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 365 | """simple docstring"""
import numpy as np
def lowercase ( a__ : Optional[Any] , a__ : str , a__ : Union[str, Any] , a__ : Any , a__ : List[str] ) -> Dict:
_UpperCamelCase = int(np.ceil((x_end - xa) / h ) )
_UpperCamelCase = np.zeros((n + 1,) )
_UpperCamelCase = ya
_UpperCamelCase = xa
for k in range(a__ ):
_UpperCamelCase = f(a__ , y[k] )
_UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_UpperCamelCase = f(x + h , y[k] + h * ka )
_UpperCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class snake_case ( __a ):
'''simple docstring'''
snake_case_ : Any = """speech_to_text_2"""
snake_case_ : Any = ["""past_key_values"""]
snake_case_ : Tuple = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , lowerCAmelCase : str=1_0000 , lowerCAmelCase : str=6 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[str]="relu" , lowerCAmelCase : Optional[Any]=256 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : Dict=0 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Optional[int]=1024 , **lowerCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = vocab_size
_snake_case : str = d_model
_snake_case : Tuple = decoder_ffn_dim
_snake_case : Optional[int] = decoder_layers
_snake_case : Optional[Any] = decoder_attention_heads
_snake_case : Union[str, Any] = dropout
_snake_case : List[str] = attention_dropout
_snake_case : str = activation_dropout
_snake_case : Union[str, Any] = activation_function
_snake_case : List[str] = init_std
_snake_case : int = decoder_layerdrop
_snake_case : int = use_cache
_snake_case : str = decoder_layers
_snake_case : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : int = max_target_positions
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , decoder_start_token_id=a_ , **a_ , )
| 317 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = get_activation('''swish''' )
self.assertIsInstance(__a , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
def snake_case__ ( self ):
_lowerCamelCase = get_activation('''silu''' )
self.assertIsInstance(__a , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
def snake_case__ ( self ):
_lowerCamelCase = get_activation('''mish''' )
self.assertIsInstance(__a , nn.Mish )
self.assertEqual(act(torch.tensor(-2_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
def snake_case__ ( self ):
_lowerCamelCase = get_activation('''gelu''' )
self.assertIsInstance(__a , nn.GELU )
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
| 350 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = ['pixel_values']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 2_5_5 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 3_8_4}
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = do_resize
_lowerCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
_lowerCamelCase = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_lowerCamelCase = resample
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
_lowerCamelCase = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_lowerCamelCase = int(shortest_edge / crop_pct )
_lowerCamelCase = get_resize_output_image_size(lowerCamelCase__ , size=lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__ , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__ , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = crop_pct if crop_pct is not None else self.crop_pct
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase = image_std if image_std is not None else self.image_std
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , crop_pct=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_lowerCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 73 | 0 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = (UnCLIPScheduler,)
def A ( self : Tuple , **A : Any ) -> Any:
lowercase_ : Dict = {
"num_train_timesteps": 10_00,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**__lowerCamelCase )
return config
def A ( self : List[str] ) -> Dict:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def A ( self : Union[str, Any] ) -> str:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def A ( self : int ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def A ( self : List[str] ) -> int:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def A ( self : Optional[Any] ) -> List[Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def A ( self : Optional[Any] ) -> Dict:
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : List[Any] = self.scheduler_classes[0]
lowercase_ : Dict = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowercase_ : Optional[Any] = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9994987 ) ) < 1e-5
def A ( self : Tuple ) -> str:
lowercase_ : List[Any] = self.scheduler_classes[0]
lowercase_ : Union[str, Any] = self.get_scheduler_config(variance_type='''learned_range''' )
lowercase_ : List[str] = scheduler_class(**__lowerCamelCase )
lowercase_ : Any = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=__lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=__lowerCamelCase ) - -0.0010011 < 1e-5
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : List[str] = self.scheduler_classes[0]
lowercase_ : Dict = self.get_scheduler_config()
lowercase_ : List[str] = scheduler_class(**__lowerCamelCase )
lowercase_ : Optional[int] = scheduler.timesteps
lowercase_ : Union[str, Any] = self.dummy_model()
lowercase_ : List[Any] = self.dummy_sample_deter
lowercase_ : Any = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
lowercase_ : Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
lowercase_ : Optional[int] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
lowercase_ : List[Any] = pred_prev_sample
lowercase_ : str = torch.sum(torch.abs(__lowerCamelCase ) )
lowercase_ : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def A ( self : int ) -> Any:
lowercase_ : Optional[Any] = self.scheduler_classes[0]
lowercase_ : Union[str, Any] = self.get_scheduler_config()
lowercase_ : Dict = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(25 )
lowercase_ : Dict = scheduler.timesteps
lowercase_ : List[Any] = self.dummy_model()
lowercase_ : Optional[int] = self.dummy_sample_deter
lowercase_ : str = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
lowercase_ : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
lowercase_ : List[str] = None
else:
lowercase_ : int = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ : Any = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
lowercase_ : Dict = pred_prev_sample
lowercase_ : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
lowercase_ : Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def A ( self : List[str] ) -> str:
pass
def A ( self : Optional[int] ) -> int:
pass
| 33 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : List[str] ):
UpperCamelCase__ = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCamelCase__ = dict(zip(_a , range(len(_a ) ) ) )
UpperCamelCase__ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
UpperCamelCase__ = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
UpperCamelCase__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
UpperCamelCase__ = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def A_ ( self : List[str] , **_a : List[Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **_a )
def A_ ( self : Tuple , **_a : List[Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **_a )
def A_ ( self : str , **_a : List[str] ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def A_ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : int ):
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
UpperCamelCase__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def A_ ( self : List[Any] ):
UpperCamelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ = self.get_image_processor(do_normalize=_a )
UpperCamelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def A_ ( self : Tuple ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(_a , return_tensors='''np''' )
UpperCamelCase__ = processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A_ ( self : int ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = processor(text=_a , return_tensors='''np''' )
UpperCamelCase__ = tokenizer(_a , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def A_ ( self : Optional[int] ):
UpperCamelCase__ = '''google/owlvit-base-patch32'''
UpperCamelCase__ = OwlViTProcessor.from_pretrained(_a )
UpperCamelCase__ = ['''cat''', '''nasa badge''']
UpperCamelCase__ = processor(text=_a )
UpperCamelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def A_ ( self : int ):
UpperCamelCase__ = '''google/owlvit-base-patch32'''
UpperCamelCase__ = OwlViTProcessor.from_pretrained(_a )
UpperCamelCase__ = [['''cat''', '''nasa badge'''], ['''person''']]
UpperCamelCase__ = processor(text=_a )
UpperCamelCase__ = 16
UpperCamelCase__ = len(_a )
UpperCamelCase__ = max([len(_a ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = '''google/owlvit-base-patch32'''
UpperCamelCase__ = OwlViTProcessor.from_pretrained(_a )
UpperCamelCase__ = ['''cat''', '''nasa badge''']
UpperCamelCase__ = processor(text=_a )
UpperCamelCase__ = 16
UpperCamelCase__ = inputs['''input_ids''']
UpperCamelCase__ = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def A_ ( self : int ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(images=_a , query_images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def A_ ( self : Any ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(_a )
UpperCamelCase__ = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
| 35 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : List[Any] ):
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_a ) , torch_builtin(_a ) ) )
self.assertFalse(torch.allclose(gelu_python(_a ) , gelu_new(_a ) ) )
def A_ ( self : Tuple ):
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation('''gelu''' )
UpperCamelCase__ = get_activation('''gelu_10''' )
UpperCamelCase__ = torch_builtin(_a )
UpperCamelCase__ = geluaa(_a )
UpperCamelCase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A_ ( self : str ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_a ):
get_activation('''bogus''' )
with self.assertRaises(_a ):
get_activation(_a )
def A_ ( self : List[Any] ):
UpperCamelCase__ = get_activation('''gelu''' )
UpperCamelCase__ = 1
UpperCamelCase__ = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_a ):
UpperCamelCase__ = acta.a
| 35 | 1 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = ComputeEnvironment.AMAZON_SAGEMAKER
__lowerCamelCase : str = True
__lowerCamelCase : Dict = 'ml.p3.2xlarge'
__lowerCamelCase : Tuple = 'accelerate_sagemaker_execution_role'
__lowerCamelCase : int = 'hf-sm'
__lowerCamelCase : Optional[int] = 'us-east-1'
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : str = 'accelerate-sagemaker-1'
__lowerCamelCase : str = '1.6'
__lowerCamelCase : Optional[int] = '4.4'
__lowerCamelCase : Optional[int] = 'train.py'
__lowerCamelCase : List[Any] = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
__lowerCamelCase : Dict = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , A )
assert isinstance(converted_args['''do_train'''] , A )
assert isinstance(converted_args['''epochs'''] , A )
assert isinstance(converted_args['''learning_rate'''] , A )
assert isinstance(converted_args['''max_steps'''] , A )
with pytest.raises(A ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 211 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = u
for i in range(1 , __A):
_a = temp * (u - i)
return temp
def lowerCAmelCase ():
"""simple docstring"""
_a = int(input('''enter the numbers of values: '''))
_a = []
for _ in range(__A):
y.append([])
for i in range(__A):
for j in range(__A):
y[i].append(__A)
_a = 0
print('''enter the values of parameters in a list: ''')
_a = list(map(__A , input().split()))
print('''enter the values of corresponding parameters: ''')
for i in range(__A):
_a = float(input())
_a = int(input('''enter the value to interpolate: '''))
_a = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A):
for j in range(n - i):
_a = y[j + 1][i - 1] - y[j][i - 1]
_a = y[0][0]
for i in range(1 , __A):
summ += (ucal(__A , __A) * y[0][i]) / math.factorial(__A)
print(F'''the value at {value} is {summ}''')
if __name__ == "__main__":
main()
| 211 | 1 |
from math import isqrt
def snake_case_ ( snake_case ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case ) + 1 ) )
def snake_case_ ( snake_case = 10**6 ) -> int:
lowercase__: Optional[Any] = 0
lowercase__: Tuple = 1
lowercase__: Optional[int] = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 288 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=128 , lowerCAmelCase__=32 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = parent
lowercase__: str = batch_size
lowercase__: Dict = seq_length
lowercase__: str = is_training
lowercase__: List[str] = use_input_mask
lowercase__: str = use_token_type_ids
lowercase__: Tuple = use_labels
lowercase__: int = vocab_size
lowercase__: Dict = hidden_size
lowercase__: Tuple = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[str] = hidden_dropout_prob
lowercase__: str = attention_probs_dropout_prob
lowercase__: Dict = max_position_embeddings
lowercase__: Optional[Any] = type_vocab_size
lowercase__: List[str] = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Optional[int] = num_labels
lowercase__: Union[str, Any] = num_choices
lowercase__: int = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[str] = None
if self.use_input_mask:
lowercase__: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Optional[int] = None
if self.use_token_type_ids:
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Tuple = None
lowercase__: Optional[Any] = None
if self.use_labels:
lowercase__: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): Tuple = self.prepare_config_and_inputs()
lowercase__: Optional[int] = True
lowercase__: Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: List[Any] = NezhaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowercase__: str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = True
lowercase__: Optional[Any] = NezhaModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
lowercase__: str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
lowercase__: List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = NezhaForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Any = NezhaForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Union[str, Any] = NezhaForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[str] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = NezhaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = self.num_labels
lowercase__: List[Any] = NezhaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.num_labels
lowercase__: Dict = NezhaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = self.num_choices
lowercase__: str = NezhaForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: str = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): str = config_and_inputs
lowercase__: Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : List[Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__lowercase : Any = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Dict = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
lowercase__: int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: List[Any] = NezhaModelTester(self )
lowercase__: Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase__: str = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: List[str] = NezhaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ , lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase__: Optional[int] = True
lowercase__: Optional[int] = model_class(config=lowerCAmelCase__ )
lowercase__: Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'bert.pt' ) )
lowercase__: List[str] = torch.jit.load(os.path.join(lowerCAmelCase__ , 'bert.pt' ) , map_location=lowerCAmelCase__ )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase__ ) , inputs_dict['attention_mask'].to(lowerCAmelCase__ ) )
@require_torch
class __a ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
lowercase__: Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__: Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowercase__: Optional[Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: int = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
lowercase__: Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__: Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowercase__: int = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: Union[str, Any] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
| 288 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def __lowercase ( self : Tuple ):
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def __lowercase ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def __lowercase ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = DDIMScheduler()
lowerCAmelCase = self.dummy_vq_model
lowerCAmelCase = LDMPipeline(unet=lowerCAmelCase , vqvae=lowerCAmelCase , scheduler=lowerCAmelCase )
ldm.to(lowerCAmelCase )
ldm.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = ldm(generator=lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = ldm(generator=lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=lowerCAmelCase )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
lowerCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : Dict ):
lowerCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCAmelCase )
ldm.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = ldm(generator=lowerCAmelCase , num_inference_steps=5 , output_type="""numpy""" ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
lowerCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 155 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
a = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def lowercase (snake_case__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
return sd
def lowercase (snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=rename_keys_prefix ) -> Dict:
'''simple docstring'''
lowerCAmelCase = OrderedDict()
lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCAmelCase = key
for name_pair in rename_keys_prefix:
lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCAmelCase = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def lowercase (snake_case__ : List[Any] , snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowerCAmelCase = """pretraining"""
if "vcr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 512}
lowerCAmelCase = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
lowerCAmelCase = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
lowerCAmelCase = """vqa"""
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
lowerCAmelCase = """nlvr"""
lowerCAmelCase = VisualBertConfig(**snake_case__ )
# Load State Dict
lowerCAmelCase = load_state_dict(snake_case__ )
lowerCAmelCase = get_new_dict(snake_case__ , snake_case__ )
if model_type == "pretraining":
lowerCAmelCase = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
lowerCAmelCase = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
lowerCAmelCase = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
lowerCAmelCase = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 155 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=a__ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=a__ , default=5 )
parser.add_argument('''--batch_size''' , type=a__ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=a__ , default=1 )
parser.add_argument('''--freeze''' , type=a__ , default=a__ )
parser.add_argument('''--learning_rate''' , type=a__ , default=5e-4 )
parser.add_argument('''--seed''' , type=a__ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=a__ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=a__ , default=10 )
parser.add_argument('''--weight_decay''' , type=a__ , default=0.01 )
parser.add_argument('''--output_dir''' , type=a__ , default='''./results''' )
return parser.parse_args()
a__ : Union[str, Any] = load('''accuracy''')
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = eval_pred
SCREAMING_SNAKE_CASE : Any = np.argmax(a__ , axis=1 )
return metric.compute(predictions=a__ , references=a__ )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->None:
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) ->List[Any]:
if control.should_evaluate:
SCREAMING_SNAKE_CASE : List[str] = deepcopy(_lowerCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = get_args()
set_seed(args.seed )
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
SCREAMING_SNAKE_CASE : Tuple = dataset.train_test_split(test_size=0.2 )
SCREAMING_SNAKE_CASE : List[Any] = train_test['''test'''].train_test_split(test_size=0.5 )
SCREAMING_SNAKE_CASE : Tuple = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE : str = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
SCREAMING_SNAKE_CASE : Any = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(a__ ):
SCREAMING_SNAKE_CASE : Any = tokenizer(example['''src'''] , truncation=a__ , max_length=1_024 )
SCREAMING_SNAKE_CASE : Optional[int] = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
SCREAMING_SNAKE_CASE : Dict = train_test_validation.map(
a__ , batched=a__ , remove_columns=train_test_validation['''train'''].column_names , )
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(tokenizer=a__ )
SCREAMING_SNAKE_CASE : List[Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
SCREAMING_SNAKE_CASE : str = Trainer(
model=a__ , args=a__ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=a__ , data_collator=a__ , compute_metrics=a__ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(a__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 19 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a__ : Any = TypeVar('''T''')
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return (position - 1) // 2
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return (2 * position) + 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return (2 * position) + 2
class a_ ( Generic[T] ):
"""simple docstring"""
def __init__( self ) ->None:
SCREAMING_SNAKE_CASE : list[tuple[T, int]] = []
SCREAMING_SNAKE_CASE : dict[T, int] = {}
SCREAMING_SNAKE_CASE : int = 0
def __len__( self ) ->int:
return self.elements
def __repr__( self ) ->str:
return str(self.heap )
def __lowerCAmelCase ( self ) ->bool:
# Check if the priority queue is empty
return self.elements == 0
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
SCREAMING_SNAKE_CASE : Tuple = self.elements
self.elements += 1
self._bubble_up(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.heap[0]
self._bubble_down(_lowerCamelCase )
return elem
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->None:
# Update the weight of the given key
SCREAMING_SNAKE_CASE : List[Any] = self.position_map[elem]
SCREAMING_SNAKE_CASE : Any = (elem, weight)
if position > 0:
SCREAMING_SNAKE_CASE : List[Any] = get_parent_position(_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_lowerCamelCase )
else:
self._bubble_down(_lowerCamelCase )
else:
self._bubble_down(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
SCREAMING_SNAKE_CASE : Optional[Any] = self.position_map[elem]
if curr_pos == 0:
return None
SCREAMING_SNAKE_CASE : str = get_parent_position(_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.heap[curr_pos]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_lowerCamelCase , _lowerCamelCase )
return self._bubble_up(_lowerCamelCase )
return None
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
SCREAMING_SNAKE_CASE : Optional[Any] = self.position_map[elem]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.heap[curr_pos]
SCREAMING_SNAKE_CASE : List[str] = get_child_left_position(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = get_child_right_position(_lowerCamelCase )
if child_left_position < self.elements and child_right_position < self.elements:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.heap[child_left_position]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_lowerCamelCase , _lowerCamelCase )
return self._bubble_down(_lowerCamelCase )
if child_left_position < self.elements:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_lowerCamelCase , _lowerCamelCase )
return self._bubble_down(_lowerCamelCase )
else:
return None
if child_right_position < self.elements:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_lowerCamelCase , _lowerCamelCase )
return self._bubble_down(_lowerCamelCase )
return None
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->None:
# Swap the nodes at the given positions
SCREAMING_SNAKE_CASE : Optional[int] = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE : Any = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
SCREAMING_SNAKE_CASE : Optional[int] = nodea_pos
SCREAMING_SNAKE_CASE : List[str] = nodea_pos
class a_ ( Generic[T] ):
"""simple docstring"""
def __init__( self ) ->None:
SCREAMING_SNAKE_CASE : dict[T, dict[T, int]] = {}
SCREAMING_SNAKE_CASE : int = 0
def __repr__( self ) ->str:
return str(self.connections )
def __len__( self ) ->int:
return self.nodes
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
SCREAMING_SNAKE_CASE : Any = {}
self.nodes += 1
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->None:
# Add an edge between 2 nodes in the graph
self.add_node(_lowerCamelCase )
self.add_node(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = weight
SCREAMING_SNAKE_CASE : str = weight
def UpperCAmelCase_( a__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : dict[T, int] = {node: maxsize for node in graph.connections}
SCREAMING_SNAKE_CASE : dict[T, T | None] = {node: None for node in graph.connections}
SCREAMING_SNAKE_CASE : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(a__ , a__ )
if priority_queue.is_empty():
return dist, parent
# initialization
SCREAMING_SNAKE_CASE : List[Any] = priority_queue.extract_min()
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE : Any = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
SCREAMING_SNAKE_CASE : str = node
# running prim's algorithm
while not priority_queue.is_empty():
SCREAMING_SNAKE_CASE : List[str] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE : List[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
SCREAMING_SNAKE_CASE : str = node
return dist, parent
| 19 | 1 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase__( __UpperCamelCase: bool = True ,*__UpperCamelCase: Union[str, Any] ,**__UpperCamelCase: List[Any] ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
SCREAMING_SNAKE_CASE : Tuple = False
if main_process_only:
SCREAMING_SNAKE_CASE : Any = PartialState().local_process_index == 0
return _tqdm(*__UpperCamelCase ,**__UpperCamelCase ,disable=__UpperCamelCase )
| 251 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCamelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCamelCase_ = state_dict["cls.predictions.decoder.weight"]
UpperCamelCase_ = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[F"""cls.predictions.transform.dense.{w}"""]
UpperCamelCase_ = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 251 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = """sew"""
def __init__( self : int , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : int=3_072 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : int=1E-5 , UpperCAmelCase_ : Union[str, Any]="group" , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase_ : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase_ : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=128 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=0.05 , UpperCAmelCase_ : Union[str, Any]=10 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : List[str]=10 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : int="mean" , UpperCAmelCase_ : int=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Tuple=2 , **UpperCAmelCase_ : Any , ) ->List[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = squeeze_factor
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# sequence classification
snake_case_ = use_weighted_layer_sum
snake_case_ = classifier_proj_size
@property
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 233 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
snake_case_ = """laion/clap-htsat-unfused"""
snake_case_ = tempfile.mkdtemp()
def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : Tuple ) ->str:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , **UpperCAmelCase_ : Any ) ->Optional[Any]:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_feature_extractor()
snake_case_ = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
snake_case_ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
snake_case_ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ = self.get_feature_extractor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
snake_case_ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
snake_case_ = floats_list((3, 1_000) )
snake_case_ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" )
snake_case_ = processor(audios=UpperCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
snake_case_ = """This is a test string"""
snake_case_ = processor(text=UpperCAmelCase_ )
snake_case_ = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(UpperCAmelCase_ )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 233 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __A ) -> list[int]:
'''simple docstring'''
return [ord(__A ) - 96 for elem in plain]
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase__ = encode(input("-> " ).strip().lower() )
print("Encoded: ", __A )
print("Decoded:", decode(__A ) )
if __name__ == "__main__":
main()
| 65 | import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , 'Tatoeba directory does not exist.' )
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase )
@slow
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 65 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__UpperCAmelCase = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Any ) -> str:
'''simple docstring'''
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCAmelCase_ :Any = """lm_head"""
lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
lowerCAmelCase_ :List[Any] = getattr(lowercase__ , lowercase__ ).shape
else:
lowerCAmelCase_ :List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase_ :List[str] = value
elif weight_type == "weight_g":
lowerCAmelCase_ :Any = value
elif weight_type == "weight_v":
lowerCAmelCase_ :List[str] = value
elif weight_type == "bias":
lowerCAmelCase_ :Dict = value
else:
lowerCAmelCase_ :Optional[Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :Union[str, Any] = fairseq_model.state_dict()
lowerCAmelCase_ :Any = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase_ :str = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase_ :Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase_ :Optional[Any] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase_ :Optional[int] = True
if "*" in mapped_key:
lowerCAmelCase_ :Any = name.split(lowercase__ )[0].split(""".""" )[-2]
lowerCAmelCase_ :Optional[Any] = mapped_key.replace("""*""" , lowercase__ )
if "weight_g" in name:
lowerCAmelCase_ :str = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase_ :Any = """weight_v"""
elif "bias" in name:
lowerCAmelCase_ :List[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase_ :str = """weight"""
else:
lowerCAmelCase_ :str = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase_ :Union[str, Any] = name.split(""".""" )
lowerCAmelCase_ :Tuple = int(items[0] )
lowerCAmelCase_ :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase_ :str = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase_ :Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase_ :List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase_ :List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Tuple=None , lowercase__ : int=None , lowercase__ : Optional[Any]=True ) -> Any:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ :List[Any] = UniSpeechConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ :str = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase_ :Union[str, Any] = Dictionary.load_from_json(lowercase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase_ :Any = target_dict.pad_index
lowerCAmelCase_ :Union[str, Any] = target_dict.bos_index
lowerCAmelCase_ :Optional[Any] = target_dict.eos_index
lowerCAmelCase_ :Dict = len(target_dict.symbols )
lowerCAmelCase_ :Optional[int] = os.path.join(lowercase__ , """vocab.json""" )
if not os.path.isdir(lowercase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase__ ) )
return
os.makedirs(lowercase__ , exist_ok=lowercase__ )
lowerCAmelCase_ :List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase_ :List[str] = 4_2
lowerCAmelCase_ :List[str] = 4_3
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase__ , lowercase__ )
lowerCAmelCase_ :int = WavaVecaPhonemeCTCTokenizer(
lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase__ , )
lowerCAmelCase_ :List[Any] = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase_ :Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
lowerCAmelCase_ :Dict = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ )
processor.save_pretrained(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = UniSpeechForCTC(lowercase__ )
else:
lowerCAmelCase_ :List[str] = UniSpeechForPreTraining(lowercase__ )
if is_finetuned:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase_ :Optional[int] = model[0].eval()
recursively_load_weights(lowercase__ , lowercase__ , lowercase__ )
hf_unispeech.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__UpperCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 1 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = CodeGenTokenizer
SCREAMING_SNAKE_CASE_ : Any = CodeGenTokenizerFast
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Dict = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE_ : Dict = False
def A ( self : Dict ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
lowercase_ : Dict = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
lowercase_ : str = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase_ : Union[str, Any] = {'''unk_token''': '''<unk>'''}
lowercase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def A ( self : Dict , **A : Tuple ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def A ( self : str , **A : List[str] ) -> Dict:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def A ( self : Union[str, Any] , A : Union[str, Any] ) -> Union[str, Any]:
lowercase_ : Dict = '''lower newer'''
lowercase_ : int = '''lower newer'''
return input_text, output_text
def A ( self : Dict ) -> str:
lowercase_ : Optional[int] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ : List[str] = '''lower newer'''
lowercase_ : List[str] = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase_ : Tuple = tokenizer.tokenize(__UpperCamelCase , add_prefix_space=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
lowercase_ : str = tokens + [tokenizer.unk_token]
lowercase_ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def A ( self : str ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer(add_prefix_space=__UpperCamelCase )
lowercase_ : List[str] = '''lower newer'''
# Testing tokenization
lowercase_ : List[str] = tokenizer.tokenize(__UpperCamelCase , add_prefix_space=__UpperCamelCase )
lowercase_ : List[Any] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids without special tokens
lowercase_ : str = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
lowercase_ : int = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids with special tokens
lowercase_ : int = self.get_rust_tokenizer(add_prefix_space=__UpperCamelCase )
lowercase_ : int = tokenizer.encode(__UpperCamelCase , add_prefix_space=__UpperCamelCase )
lowercase_ : Optional[int] = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing the unknown token
lowercase_ : List[Any] = tokens + [rust_tokenizer.unk_token]
lowercase_ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def A ( self : Optional[Any] , *A : Optional[int] , **A : str ) -> Tuple:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A ( self : List[str] , A : List[str]=15 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : str = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
# Simple input
lowercase_ : Union[str, Any] = '''This is a simple input'''
lowercase_ : Dict = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase_ : Tuple = ('''This is a simple input''', '''This is a pair''')
lowercase_ : Union[str, Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' , )
def A ( self : str ) -> Union[str, Any]:
lowercase_ : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
lowercase_ : Optional[Any] = '''This is a simple input'''
lowercase_ : List[Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
lowercase_ : Optional[int] = ('''This is a simple input''', '''This is a pair''')
lowercase_ : List[str] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
lowercase_ : List[str] = tokenizer.pad_token_id
lowercase_ : int = tokenizer(__UpperCamelCase , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
lowercase_ : Any = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncate=__UpperCamelCase , return_tensors='''np''' )
lowercase_ : Tuple = tokenizer(*__UpperCamelCase , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
lowercase_ : int = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncate=__UpperCamelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def A ( self : List[Any] ) -> int:
lowercase_ : Union[str, Any] = '''$$$'''
lowercase_ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__UpperCamelCase , add_bos_token=__UpperCamelCase )
lowercase_ : Union[str, Any] = '''This is a simple input'''
lowercase_ : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase_ : Any = tokenizer.bos_token_id
lowercase_ : Optional[Any] = tokenizer(__UpperCamelCase )
lowercase_ : Dict = tokenizer(__UpperCamelCase )
self.assertEqual(out_s.input_ids[0] , __UpperCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase_ : Union[str, Any] = tokenizer.decode(out_s.input_ids )
lowercase_ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __UpperCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def A ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ : Dict = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
lowercase_ : List[Any] = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
lowercase_ : Optional[Any] = '''\nif len_a > len_b: result = a\nelse: result = b'''
lowercase_ : List[Any] = tokenizer.encode(__UpperCamelCase )
lowercase_ : Optional[int] = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
lowercase_ : str = tokenizer.decode(__UpperCamelCase , truncate_before_pattern=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def A ( self : Optional[int] ) -> Tuple:
pass
| 33 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase ( _SCREAMING_SNAKE_CASE : Features ):
'''simple docstring'''
_UpperCAmelCase = np.inf
def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary":
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return None if batch_size is np.inf else batch_size
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]:
super().__init__(
__UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths}
_UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
_UpperCAmelCase = Parquet(
cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , )
def lowercase__ ( self : Union[str, Any] )->Dict:
# Build iterable dataset
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]:
_UpperCAmelCase = dataset
_UpperCAmelCase = path_or_buf
_UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features )
_UpperCAmelCase = parquet_writer_kwargs
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
_UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs )
else:
_UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs )
return written
def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int:
_UpperCAmelCase = 0
_UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase )
_UpperCAmelCase = self.dataset.features.arrow_schema
_UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
_UpperCAmelCase = query_table(
table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__UpperCamelCase )
written += batch.nbytes
writer.close()
return written
| 260 | 0 |
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Any , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : str) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : int) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : str , **lowercase_ : Optional[int]) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : List[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Any , *lowercase_ : List[str] , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Optional[Any] , **lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Dict , **lowercase_ : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Optional[int] , **lowercase_ : str) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : int , *lowercase_ : List[Any] , **lowercase_ : Dict) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : str , **lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : Tuple , **lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : str , **lowercase_ : int) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : Optional[int] , **lowercase_ : Any) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : int , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Optional[int] , **lowercase_ : Dict) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Any) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Any , **lowercase_ : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : List[str] , **lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : Dict , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : str , **lowercase_ : Union[str, Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
def lowerCAmelCase__ ( *a__ , **a__ ) ->Optional[Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->Any:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->List[Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->List[Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->List[str]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : List[Any]) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : List[str] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : List[str] , **lowercase_ : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : Tuple , **lowercase_ : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : int) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Any) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Dict , **lowercase_ : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Union[str, Any] , **lowercase_ : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : int , **lowercase_ : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : Tuple , **lowercase_ : Optional[int]) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : Dict) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : Tuple , **lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : str , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : List[Any] , **lowercase_ : int) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : int , **lowercase_ : int) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : str , **lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : int , **lowercase_ : List[str]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Any) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any]) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : int , **lowercase_ : Optional[int]) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Any) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : List[str] , **lowercase_ : Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Optional[Any] , **lowercase_ : List[str]) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : str , **lowercase_ : int) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : int , **lowercase_ : List[str]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : List[str]) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Optional[int] , **lowercase_ : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Optional[int] , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : int , *lowercase_ : Dict , **lowercase_ : Dict) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : str , **lowercase_ : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : List[str]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Any , **lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : str) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : str , **lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : List[str] , **lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : List[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : Tuple , **lowercase_ : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : Tuple , **lowercase_ : List[Any]) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Dict , **lowercase_ : List[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Dict , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Any , **lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Dict) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : str , **lowercase_ : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : List[Any] , **lowercase_ : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : str , **lowercase_ : Union[str, Any]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : int , **lowercase_ : Optional[int]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : str , **lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Optional[int] , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : Union[str, Any] , **lowercase_ : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : Dict , **lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : List[str]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any]) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Dict , **lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : str , **lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : str) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : str , **lowercase_ : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Any , *lowercase_ : Tuple , **lowercase_ : str) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : str , **lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Any) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Any , **lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Union[str, Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : str , **lowercase_ : Any) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : Union[str, Any] , **lowercase_ : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Any , **lowercase_ : List[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : str , **lowercase_ : int) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : List[Any] , **lowercase_ : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Dict , **lowercase_ : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : List[str] , **lowercase_ : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Tuple , **lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
| 63 | import math
class _UpperCAmelCase :
'''simple docstring'''
def __UpperCAmelCase ( self : Dict , lowercase_ : list[list[float]] , lowercase_ : list[int]) -> int:
"""simple docstring"""
_UpperCamelCase = 0.0
_UpperCamelCase = 0.0
for i in range(len(lowercase_)):
da += math.pow((sample[i] - weights[0][i]) , 2)
da += math.pow((sample[i] - weights[1][i]) , 2)
return 0 if da > da else 1
return 0
def __UpperCAmelCase ( self : Any , lowercase_ : list[list[int | float]] , lowercase_ : list[int] , lowercase_ : int , lowercase_ : float) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(lowercase_)):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCAmelCase__ ( ) ->None:
'''simple docstring'''
_UpperCamelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase = SelfOrganizingMap()
_UpperCamelCase = 3
_UpperCamelCase = 0.5
for _ in range(a__ ):
for j in range(len(a__ ) ):
# training sample
_UpperCamelCase = training_samples[j]
# Compute the winning vector
_UpperCamelCase = self_organizing_map.get_winner(a__ , a__ )
# Update the winning vector
_UpperCamelCase = self_organizing_map.update(a__ , a__ , a__ , a__ )
# classify test sample
_UpperCamelCase = [0, 0, 0, 1]
_UpperCamelCase = self_organizing_map.get_winner(a__ , a__ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Union[str, Any] ={
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] =[
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 237 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self ):
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_init_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_train_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_train_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_epoch_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_epoch_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_step_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_step_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_evaluate' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_predict' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_save' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_log' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_prediction_step' )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 , snake_case=0 , snake_case=64 , snake_case=64 , snake_case=None , snake_case=False , **snake_case ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase = RegressionDataset(length=snake_case )
lowercase = RegressionDataset(length=snake_case )
lowercase = RegressionModelConfig(a=snake_case , b=snake_case )
lowercase = RegressionPreTrainedModel(snake_case )
lowercase = TrainingArguments(self.output_dir , disable_tqdm=snake_case , report_to=[] , **snake_case )
return Trainer(
snake_case , snake_case , train_dataset=snake_case , eval_dataset=snake_case , callbacks=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
self.assertEqual(len(snake_case ) , len(snake_case ) )
# Order doesn't matter
lowercase = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
lowercase = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case , snake_case ):
if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , snake_case )
elif isinstance(snake_case , snake_case ) and not isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , cba.__class__ )
elif not isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(cba.__class__ , snake_case )
else:
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = ['on_init_end', 'on_train_begin']
lowercase = 0
lowercase = len(trainer.get_eval_dataloader() )
lowercase = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(snake_case ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_trainer()
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# Callbacks passed at init are added to the default callbacks
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase = self.get_trainer(disable_tqdm=snake_case )
lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
lowercase = self.get_trainer()
lowercase = trainer.pop_callback(snake_case )
self.assertEqual(cb.__class__ , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# We can also add, pop, or remove by instance
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
lowercase = trainer.pop_callback(snake_case )
self.assertEqual(snake_case , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=snake_case )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# Independent log/save/eval
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# A bit of everything
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case ) in warn_mock.call_args[0][0]
| 195 | 0 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_A = {'UserAgent': UserAgent().random}
def lowerCamelCase__ ( a__ : int ) -> dict:
UpperCamelCase_ = script.contents[0]
UpperCamelCase_ = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase_ = self.get_json()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = requests.get(self.url , headers=__a ).text
UpperCamelCase_ = BeautifulSoup(__a , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
"""simple docstring"""
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
"""simple docstring"""
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["username"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["full_name"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["biography"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["business_email"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["external_url"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["is_verified"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["is_private"]
def lowerCamelCase__ ( a__ : str = "github" ) -> None:
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
UpperCamelCase_ = InstagramUser(snake_case_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , snake_case_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = InstagramUser('''github''')
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 368 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , __UpperCamelCase = 6 ):
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
self.create_linked_list(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = Node()
UpperCamelCase_ = current_node
UpperCamelCase_ = current_node
UpperCamelCase_ = current_node
for _ in range(1 , __UpperCamelCase ):
UpperCamelCase_ = Node()
UpperCamelCase_ = current_node
UpperCamelCase_ = previous_node
UpperCamelCase_ = current_node
UpperCamelCase_ = self.front
UpperCamelCase_ = previous_node
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCamelCase_ = self.rear.next
if self.rear:
UpperCamelCase_ = data
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCamelCase_ = self.front.data
UpperCamelCase_ = None
return data
UpperCamelCase_ = self.front
UpperCamelCase_ = old_front.next
UpperCamelCase_ = old_front.data
UpperCamelCase_ = None
return data
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowercase_ :
def __init__( self ):
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 | 0 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.txt'''}
_snake_case = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
_snake_case = {
'''facebook/esm2_t6_8M_UR50D''': 1024,
'''facebook/esm2_t12_35M_UR50D''': 1024,
}
def _UpperCamelCase ( snake_case__ ) -> str:
with open(snake_case__, "r" ) as f:
__UpperCAmelCase : List[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: List[str]="<unk>" , __lowerCamelCase: str="<cls>" , __lowerCamelCase: Optional[Any]="<pad>" , __lowerCamelCase: Union[str, Any]="<mask>" , __lowerCamelCase: List[Any]="<eos>" , **__lowerCamelCase: Optional[int] , ) -> List[Any]:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : Optional[int] = load_vocab_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = dict(enumerate(self.all_tokens ) )
__UpperCAmelCase : List[Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
__UpperCAmelCase : List[str] = unk_token
__UpperCAmelCase : Union[str, Any] = cls_token
__UpperCAmelCase : Union[str, Any] = pad_token
__UpperCAmelCase : Any = mask_token
__UpperCAmelCase : List[Any] = eos_token
__UpperCAmelCase : Tuple = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: int ) -> str:
return self._id_to_token.get(__lowerCamelCase , self.unk_token )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str ) -> int:
return self._token_to_id.get(__lowerCamelCase , self._token_to_id.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Any ) -> Union[str, Any]:
return text.split()
def _lowerCamelCase ( self: int , __lowerCamelCase: int=False ) -> Optional[Any]:
return len(self._id_to_token )
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCamelCase ( self: Dict , __lowerCamelCase: str ) -> int:
return self._token_to_id.get(__lowerCamelCase , self._token_to_id.get(self.unk_token ) )
def _lowerCamelCase ( self: Any , __lowerCamelCase: int ) -> str:
return self._id_to_token.get(__lowerCamelCase , self.unk_token )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : List[str] = [self.cls_token_id]
__UpperCAmelCase : Dict = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCamelCase ( self: str , __lowerCamelCase: List , __lowerCamelCase: Optional[List] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__UpperCAmelCase : List[Any] = [1] + ([0] * len(__lowerCamelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(__lowerCamelCase ) + [1]
return mask
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = os.path.join(__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(__lowerCamelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCamelCase ( self: Optional[int] ) -> int:
return self.get_vocab_size(with_added_tokens=__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Union[List[str], List[AddedToken]] , __lowerCamelCase: bool = False ) -> int:
return super()._add_tokens(__lowerCamelCase , special_tokens=__lowerCamelCase )
| 157 | def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : int = ""
for word_or_phrase in separated:
if not isinstance(snake_case__, snake_case__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 157 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _A ( UpperCAmelCase_ : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def _A ( self : List[str] ):
raise NotImplementedError()
| 319 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase__ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 319 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def A__ ( __lowerCamelCase ):
return "".join(sorted(__lowerCamelCase ) )
def A__ ( __lowerCamelCase ):
return word_by_signature[signature(__lowerCamelCase )]
__UpperCAmelCase = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 299 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299 | 1 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__A = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
__A = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__A = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
__A = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
__A = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
__A = ''
__A = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
assert ReadMe.from_string(_lowercase , _lowercase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path='''root''' ) ) ):
_A = ReadMe.from_string(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase ):
'''simple docstring'''
ReadMe.from_string(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
_A = ReadMe.from_readme(_lowercase , _lowercase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
_A = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
_A = ReadMe.from_readme(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
_A = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
ReadMe.from_readme(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
ReadMe.from_readme(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
| 75 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__A = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
__A = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__A = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
__A = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
__A = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
__A = ''
__A = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
assert ReadMe.from_string(_lowercase , _lowercase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path='''root''' ) ) ):
_A = ReadMe.from_string(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase ):
'''simple docstring'''
ReadMe.from_string(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
_A = ReadMe.from_readme(_lowercase , _lowercase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
_A = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
_A = ReadMe.from_readme(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
_A = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
ReadMe.from_readme(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
ReadMe.from_readme(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
| 75 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[str] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class A ( __snake_case ):
__magic_name__ = '''timesformer'''
def __init__( self , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="divided_space_time" , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[Any] = image_size
A : Any = patch_size
A : List[str] = num_channels
A : Tuple = num_frames
A : Union[str, Any] = hidden_size
A : Tuple = num_hidden_layers
A : Any = num_attention_heads
A : Optional[int] = intermediate_size
A : List[Any] = hidden_act
A : Optional[Any] = hidden_dropout_prob
A : List[str] = attention_probs_dropout_prob
A : Any = initializer_range
A : Optional[int] = layer_norm_eps
A : Optional[int] = qkv_bias
A : List[Any] = attention_type
A : Optional[Any] = drop_path_rate
| 3 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return 0.0
def a_ ( __snake_case : np.ndarray , __snake_case : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
lowerCamelCase_ =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowerCamelCase_ =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def a_ ( __snake_case : FilterType , __snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ =512
lowerCamelCase_ =[1] + [0] * (size - 1)
lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs]
lowerCamelCase_ =[0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ =np.abs(np.fft.fft(__snake_case ) )
lowerCamelCase_ =20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
lowerCamelCase_ =get_bounds(__snake_case , __snake_case )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(__snake_case )
plt.show()
def a_ ( __snake_case : FilterType , __snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ =512
lowerCamelCase_ =[1] + [0] * (size - 1)
lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs]
lowerCamelCase_ =[0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ =np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(__snake_case , -2 * pi ) )
plt.show()
| 75 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = """vit"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Tuple=7_68 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : List[Any]=30_72 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : int=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_24 , SCREAMING_SNAKE_CASE_ : List[Any]=16 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : int=16 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
A: Any = hidden_size
A: List[Any] = num_hidden_layers
A: str = num_attention_heads
A: str = intermediate_size
A: Union[str, Any] = hidden_act
A: int = hidden_dropout_prob
A: Optional[int] = attention_probs_dropout_prob
A: List[Any] = initializer_range
A: Optional[int] = layer_norm_eps
A: Dict = image_size
A: Union[str, Any] = patch_size
A: Any = num_channels
A: List[str] = qkv_bias
A: Tuple = encoder_stride
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = version.parse("""1.11""" )
@property
def _snake_case ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self : List[str] ) -> float:
'''simple docstring'''
return 1E-4
| 334 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = DebertaTokenizer
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : int = DebertaTokenizerFast
def _snake_case ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A: Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
A: int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A: Union[str, Any] = {'''unk_token''': '''[UNK]'''}
A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
A: Optional[int] = '''lower newer'''
A: str = '''lower newer'''
return input_text, output_text
def _snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A: str = self.get_tokenizer()
A: Any = '''lower newer'''
A: Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokens + [tokenizer.unk_token]
A: int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
A: str = self.get_tokenizer()
A: List[str] = tokenizer('''Hello''' , '''World''' )
A: Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE_ )
@slow
def _snake_case ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Any = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
A: int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A: List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
A: Any = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for seq in encoding['''input_ids''']]
# fmt: off
A: Any = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A: Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE_ )
for expected, decoded in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 334 | 1 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(example['content'] , truncation=a )['input_ids']
SCREAMING_SNAKE_CASE_ : int = len(example['content'] ) / len(output['input_ids'] )
return output
lowerCAmelCase : List[Any] = HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : Tuple = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any = multiprocessing.cpu_count()
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : Tuple = time.time()
lowerCAmelCase : Dict = load_dataset(args.dataset_name, split='train')
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
lowerCAmelCase : List[Any] = time.time()
lowerCAmelCase : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
lowerCAmelCase : List[Any] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 253 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase : Any = logging.get_logger(__name__)
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = WavaVecaForSequenceClassification.from_pretrained(a , config=a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = downstream_dict['projector.weight']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = downstream_dict['projector.bias']
SCREAMING_SNAKE_CASE_ : List[str] = downstream_dict['model.post_net.linear.weight']
SCREAMING_SNAKE_CASE_ : Optional[Any] = downstream_dict['model.post_net.linear.bias']
return model
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = WavaVecaForAudioFrameClassification.from_pretrained(a , config=a )
SCREAMING_SNAKE_CASE_ : Dict = downstream_dict['model.linear.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = downstream_dict['model.linear.bias']
return model
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaForXVector.from_pretrained(a , config=a )
SCREAMING_SNAKE_CASE_ : Tuple = downstream_dict['connector.weight']
SCREAMING_SNAKE_CASE_ : Any = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
SCREAMING_SNAKE_CASE_ : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
SCREAMING_SNAKE_CASE_ : Any = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
SCREAMING_SNAKE_CASE_ : List[str] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
SCREAMING_SNAKE_CASE_ : Optional[int] = downstream_dict['objective.W']
return model
@torch.no_grad()
def A_ ( a , a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch.load(a , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = checkpoint['Downstream']
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaConfig.from_pretrained(a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
a , return_attention_mask=a , do_normalize=a )
SCREAMING_SNAKE_CASE_ : Tuple = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
SCREAMING_SNAKE_CASE_ : Tuple = convert_classification(a , a , a )
elif arch.endswith('ForAudioFrameClassification' ):
SCREAMING_SNAKE_CASE_ : str = convert_diarization(a , a , a )
elif arch.endswith('ForXVector' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = convert_xvector(a , a , a )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE_ : Dict = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCAmelCase : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 253 | 1 |
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Tuple = 1
while repunit:
SCREAMING_SNAKE_CASE : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase_( a__ = 1_000_000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(a__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"{solution() = }")
| 19 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _lowercase ( lowercase__ , lowercase__):
"""simple docstring"""
A__ = 1
@register_to_config
def __init__( self : int , __lowerCamelCase : Union[str, Any]=2000 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[Any]=20 , __lowerCamelCase : str=1E-3 ):
'''simple docstring'''
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Any = None
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowerCamelCase__ : List[str] = torch.linspace(1 , self.config.sampling_eps , __lowerCamelCase , device=__lowerCamelCase )
def lowerCAmelCase ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCamelCase__ : Union[str, Any] = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCamelCase__ : List[str] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCamelCase__ : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCamelCase__ : Tuple = std.unsqueeze(-1 )
lowerCamelCase__ : int = -score / std
# compute
lowerCamelCase__ : str = -1.0 / len(self.timesteps )
lowerCamelCase__ : Tuple = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCamelCase__ : Optional[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCamelCase__ : Any = beta_t.unsqueeze(-1 )
lowerCamelCase__ : int = -0.5 * beta_t * x
lowerCamelCase__ : str = torch.sqrt(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = drift - diffusion**2 * score
lowerCamelCase__ : Any = x + drift * dt
# add noise
lowerCamelCase__ : Dict = randn_tensor(x.shape , layout=x.layout , generator=__lowerCamelCase , device=x.device , dtype=x.dtype )
lowerCamelCase__ : Union[str, Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 184 |
class _lowercase : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[bool]] ):
'''simple docstring'''
lowerCamelCase__ : int = row
lowerCamelCase__ : Optional[Any] = col
lowerCamelCase__ : Union[str, Any] = graph
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[bool]] ):
'''simple docstring'''
lowerCamelCase__ : str = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowerCamelCase__ : Tuple = [-1, 0, 1, -1, 1, -1, 0, 1]
lowerCamelCase__ : List[Any] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __lowerCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __lowerCamelCase )
def lowerCAmelCase ( self : List[Any] ): # And finally, count all islands.
'''simple docstring'''
lowerCamelCase__ : List[Any] = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowerCamelCase__ : Optional[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
count += 1
return count
| 184 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __snake_case (_a ):
def __init__( self : List[Any] , _UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
_lowerCAmelCase : Any = data
def __iter__( self : int ) -> Optional[Any]:
'''simple docstring'''
for element in self.data:
yield element
def _UpperCAmelCase (UpperCamelCase_ : List[Any]=True ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Accelerator(even_batches=UpperCamelCase_ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : bool = False ):
'''simple docstring'''
if iterable:
_lowerCAmelCase : List[str] = DummyIterableDataset(torch.as_tensor(range(UpperCamelCase_ ) ) )
else:
_lowerCAmelCase : int = TensorDataset(torch.as_tensor(range(UpperCamelCase_ ) ) )
_lowerCAmelCase : Tuple = DataLoader(UpperCamelCase_ , batch_size=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase_ )
return dl
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : List[int] , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = create_dataloader(accelerator=UpperCamelCase_ , dataset_size=UpperCamelCase_ , batch_size=UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCamelCase_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCamelCase_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = create_accelerator(even_batches=UpperCamelCase_ )
verify_dataloader_batch_sizes(
UpperCamelCase_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCamelCase_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = create_accelerator(even_batches=UpperCamelCase_ )
_lowerCAmelCase : Any = torch.nn.Linear(1 , 1 )
_lowerCAmelCase : Tuple = accelerator.prepare(UpperCamelCase_ )
_lowerCAmelCase : str = create_dataloader(UpperCamelCase_ , dataset_size=3 , batch_size=1 )
_lowerCAmelCase : Tuple = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCamelCase_ ):
_lowerCAmelCase : Optional[Any] = ddp_model(batch[0].float() )
_lowerCAmelCase : int = output.sum()
loss.backward()
batch_idxs.append(UpperCamelCase_ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=UpperCamelCase_ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCamelCase_ )
assert "only supported for multi-GPU" in str(w[-1].message )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : int = False
_lowerCAmelCase : Union[str, Any] = create_accelerator(even_batches=UpperCamelCase_ )
_lowerCAmelCase : List[str] = torch.nn.Linear(1 , 1 )
_lowerCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase_ )
_lowerCAmelCase : str = create_dataloader(UpperCamelCase_ , dataset_size=3 , batch_size=1 )
_lowerCAmelCase : Tuple = create_dataloader(UpperCamelCase_ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase_ ):
_lowerCAmelCase : int = train_dl.batch_sampler.even_batches
_lowerCAmelCase : Optional[int] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : int = False
_lowerCAmelCase : int = create_accelerator(even_batches=UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = torch.nn.Linear(1 , 1 )
_lowerCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase_ )
create_dataloader(UpperCamelCase_ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase_ )
_lowerCAmelCase : str = create_dataloader(UpperCamelCase_ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase_ ):
_lowerCAmelCase : Dict = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : List[Any] = create_accelerator()
_lowerCAmelCase : List[Any] = torch.nn.Linear(1 , 1 )
_lowerCAmelCase : Optional[int] = accelerator.prepare(UpperCamelCase_ )
create_dataloader(UpperCamelCase_ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase_ )
with warnings.catch_warnings(record=UpperCamelCase_ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase_ ):
pass
assert issubclass(w[-1].category , UpperCamelCase_ )
assert "only supported for map-style datasets" in str(w[-1].message )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
_lowerCAmelCase : List[str] = accelerator.state.distributed_type
_lowerCAmelCase : Any = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCamelCase_ )
_lowerCAmelCase : List[Any] = original_state
if __name__ == "__main__":
main()
| 352 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_lowerCAmelCase : Optional[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Tuple = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
_lowerCAmelCase : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Tuple = image_processor(_UpperCAmelCase , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : int = processor(text=_UpperCAmelCase )
_lowerCAmelCase : Dict = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Any = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Tuple = """lower newer"""
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : str = processor.batch_decode(_UpperCAmelCase )
_lowerCAmelCase : List[str] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Dict = """lower newer"""
_lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
_lowerCAmelCase : Union[str, Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 159 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.