code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE_ ( __lowerCamelCase ):
"""simple docstring"""
_a = ['''image_processor''', '''tokenizer''']
_a = '''ViTImageProcessor'''
_a = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , A=None , A=None , **A ) -> Any:
'''simple docstring'''
__magic_name__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__magic_name__ = kwargs.pop('''feature_extractor''' )
__magic_name__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
def __call__( self , A=None , A=None , A=None , A=None , **A ) -> str:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
__magic_name__ = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if visual_prompt is not None:
__magic_name__ = self.image_processor(a_ , return_tensors=a_ , **a_ )
if images is not None:
__magic_name__ = self.image_processor(a_ , return_tensors=a_ , **a_ )
if visual_prompt is not None and images is not None:
__magic_name__ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__magic_name__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__magic_name__ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def __A ( self , *A , **A ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def __A ( self , *A , **A ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def __A ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a_ , )
return self.image_processor_class
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a_ , )
return self.image_processor | 707 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : Optional[Any] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ['LayoutLMv2FeatureExtractor']
a_ : int = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 708 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[Any]=False ):
__magic_name__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'module.blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'module.blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'module.blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'module.blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'module.blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'module.blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'module.blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'module.blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'module.blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'module.blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__magic_name__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ = ''''''
else:
__magic_name__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ = state_dict.pop(f'module.blocks.{i}.attn.qkv.weight' )
__magic_name__ = state_dict.pop(f'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ = in_proj_bias[: config.hidden_size]
__magic_name__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_A , _A )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Union[str, Any] ):
__magic_name__ = dct.pop(_A )
__magic_name__ = val
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[int] ):
__magic_name__ = ViTMSNConfig()
__magic_name__ = 1000
__magic_name__ = '''datasets/huggingface/label-files'''
__magic_name__ = '''imagenet-1k-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(_A , _A ) , '''r''' ) )
__magic_name__ = {int(_A ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__magic_name__ = 384
__magic_name__ = 1536
__magic_name__ = 6
elif "l16" in checkpoint_url:
__magic_name__ = 1024
__magic_name__ = 4096
__magic_name__ = 24
__magic_name__ = 16
__magic_name__ = 0.1
elif "b4" in checkpoint_url:
__magic_name__ = 4
elif "l7" in checkpoint_url:
__magic_name__ = 7
__magic_name__ = 1024
__magic_name__ = 4096
__magic_name__ = 24
__magic_name__ = 16
__magic_name__ = 0.1
__magic_name__ = ViTMSNModel(_A )
__magic_name__ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''target_encoder''']
__magic_name__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(_A )
__magic_name__ = create_rename_keys(_A , base_model=_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , base_model=_A )
model.load_state_dict(_A )
model.eval()
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = Image.open(requests.get(_A , stream=_A ).raw )
__magic_name__ = ViTImageProcessor(
size=config.image_size , image_mean=_A , image_std=_A )
__magic_name__ = image_processor(images=_A , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__magic_name__ = model(**_A )
__magic_name__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__magic_name__ = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__magic_name__ = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__magic_name__ = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__magic_name__ = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__magic_name__ = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _A , atol=1E-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
a_ : int = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 709 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ : List[str] = logging.get_logger(__name__)
a_ : Union[str, Any] = {'vocab_file': 'vocab.txt'}
a_ : str = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ : List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a_ : Tuple = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class SCREAMING_SNAKE_CASE_ ( _A ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ConvBertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> List[Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
__magic_name__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase__ ) != tokenize_chinese_chars
):
__magic_name__ = getattr(UpperCamelCase__ , normalizer_state.pop('''type''' ) )
__magic_name__ = do_lower_case
__magic_name__ = strip_accents
__magic_name__ = tokenize_chinese_chars
__magic_name__ = normalizer_class(**UpperCamelCase__ )
__magic_name__ = do_lower_case
def __A ( self , A , A=None ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
__magic_name__ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ ) | 710 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 0 |
from __future__ import annotations
a_ : int = []
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : List[str] ):
for i in range(len(snake_case_ ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , len(snake_case_ ) ) ):
if board[i][j] == 1:
return False
return True
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
if row >= len(snake_case_ ):
solution.append(snake_case_ )
printboard(snake_case_ )
print()
return True
for i in range(len(snake_case_ ) ):
if is_safe(snake_case_ , snake_case_ , snake_case_ ):
__magic_name__ = 1
solve(snake_case_ , row + 1 )
__magic_name__ = 0
return False
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
for i in range(len(snake_case_ ) ):
for j in range(len(snake_case_ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
a_ : List[Any] = 8
a_ : Optional[int] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution)) | 711 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """bridgetower_vision_model"""
def __init__( self , A=7_68 , A=12 , A=3 , A=16 , A=2_88 , A=1 , A=1E-05 , A=False , A=True , A=False , **A , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__a )
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_channels
__magic_name__ = patch_size
__magic_name__ = image_size
__magic_name__ = initializer_factor
__magic_name__ = layer_norm_eps
__magic_name__ = stop_gradient
__magic_name__ = share_layernorm
__magic_name__ = remove_last_layer
@classmethod
def __A ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(__a , **__a )
if config_dict.get('''model_type''' ) == "bridgetower":
__magic_name__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__a , **__a )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """bridgetower_text_model"""
def __init__( self , A=5_02_65 , A=7_68 , A=12 , A=12 , A=1 , A=30_72 , A="gelu" , A=0.1 , A=0.1 , A=5_14 , A=1 , A=1E-05 , A=1 , A=0 , A=2 , A="absolute" , A=True , **A , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__a )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_factor
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = use_cache
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = eos_token_id
@classmethod
def __A ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(__a , **__a )
if config_dict.get('''model_type''' ) == "bridgetower":
__magic_name__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__a , **__a )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """bridgetower"""
def __init__( self , A=True , A="gelu" , A=7_68 , A=1 , A=1E-05 , A=False , A="add" , A=12 , A=6 , A=False , A=False , A=None , A=None , **A , ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''text_config_dict''' , __a )
__magic_name__ = kwargs.pop('''vision_config_dict''' , __a )
super().__init__(**__a )
__magic_name__ = share_cross_modal_transformer_layers
__magic_name__ = hidden_act
__magic_name__ = hidden_size
__magic_name__ = initializer_factor
__magic_name__ = layer_norm_eps
__magic_name__ = share_link_tower_layers
__magic_name__ = link_tower_type
__magic_name__ = num_attention_heads
__magic_name__ = num_hidden_layers
__magic_name__ = tie_word_embeddings
__magic_name__ = init_layernorm_from_vision_encoder
if text_config is None:
__magic_name__ = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
__magic_name__ = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
__magic_name__ = BridgeTowerTextConfig(**__a )
__magic_name__ = BridgeTowerVisionConfig(**__a )
@classmethod
def __A ( cls , A , A , **A ) -> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = copy.deepcopy(self.__dict__ )
__magic_name__ = self.text_config.to_dict()
__magic_name__ = self.vision_config.to_dict()
__magic_name__ = self.__class__.model_type
return output | 712 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
__magic_name__ = 0
__magic_name__ = number
while duplicate > 0:
__magic_name__ = divmod(lowerCamelCase_ , 10 )
fact_sum += factorial(lowerCamelCase_ )
return fact_sum == number
if __name__ == "__main__":
print('Program to check whether a number is a Krisnamurthy Number or not.')
a_ : Tuple = int(input('Enter number: ').strip())
print(
F"""{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number."""
) | 713 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class SCREAMING_SNAKE_CASE_ ( __lowerCamelCase ):
"""simple docstring"""
_a = 'pegasus'
_a = ['past_key_values']
_a = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A=5_02_65 , A=10_24 , A=12 , A=40_96 , A=16 , A=12 , A=40_96 , A=16 , A=0.0 , A=0.0 , A=True , A=True , A="gelu" , A=10_24 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=0 , A=False , A=0 , A=1 , A=1 , **A , ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model | 714 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
_a = CustomTokenizer
pass
| 715 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Union[str, Any] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 716 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ : int = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_a = AlbertTokenizer
_a = AlbertTokenizerFast
_a = True
_a = True
_a = True
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = AlbertTokenizer(lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self , A ) -> Dict:
'''simple docstring'''
__magic_name__ = '''this is a test'''
__magic_name__ = '''this is a test'''
return input_text, output_text
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = '''<pad>'''
__magic_name__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(lowerCAmelCase_ ) , 3_00_00 )
def __A ( self ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = '''I was born in 92000, and this is falsé.'''
__magic_name__ = tokenizer.tokenize(lowerCAmelCase_ )
__magic_name__ = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__magic_name__ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__magic_name__ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = tokenizer.encode(lowerCAmelCase_ )
__magic_name__ = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = AlbertTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
__magic_name__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase_ , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [48, 25, 21, 12_89] )
__magic_name__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
__magic_name__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
__magic_name__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = AlbertTokenizer(lowerCAmelCase_ )
__magic_name__ = tokenizer.encode('''sequence builders''' )
__magic_name__ = tokenizer.encode('''multi-sequence build''' )
__magic_name__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__magic_name__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 717 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 0 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = emb.weight.shape
__magic_name__ = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__magic_name__ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )
__magic_name__ = mam_aaa["args"] or mam_aaa["cfg"]["model"]
__magic_name__ = mam_aaa["model"]
remove_ignore_keys_(snake_case_ )
__magic_name__ = state_dict["encoder.embed_tokens.weight"].shape[0]
__magic_name__ = MaMaaaConfig(
vocab_size=snake_case_ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
__magic_name__ = state_dict["decoder.embed_tokens.weight"]
__magic_name__ = MaMaaaForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ , strict=snake_case_ )
__magic_name__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a_ : Any = parser.parse_args()
a_ : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 719 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 0 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def _SCREAMING_SNAKE_CASE ( snake_case_ : dict , snake_case_ : str , snake_case_ : set , snake_case_ : set , snake_case_ : dict , snake_case_ : dict , snake_case_ : PriorityQueue , snake_case_ : dict , snake_case_ : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__magic_name__ = cst_fwd.get(__a , np.inf )
__magic_name__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__magic_name__ = new_cost_f
__magic_name__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__magic_name__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : dict , snake_case_ : dict ):
__magic_name__ = -1
__magic_name__ = set()
__magic_name__ = set()
__magic_name__ = {source: 0}
__magic_name__ = {destination: 0}
__magic_name__ = {source: None}
__magic_name__ = {destination: None}
__magic_name__ = PriorityQueue()
__magic_name__ = PriorityQueue()
__magic_name__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__magic_name__ , __magic_name__ = queue_forward.get()
visited_forward.add(__a )
__magic_name__ , __magic_name__ = queue_backward.get()
visited_backward.add(__a )
__magic_name__ = pass_and_relaxation(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
__magic_name__ = pass_and_relaxation(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__magic_name__ = shortest_distance
return shortest_path_distance
a_ : List[Any] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
a_ : Tuple = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 720 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Dict ):
with open(snake_case_ ) as metadata_file:
__magic_name__ = json.load(snake_case_ )
__magic_name__ = LukeConfig(use_entity_aware_attention=snake_case_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )["module"]
# Load the entity vocab file
__magic_name__ = load_original_entity_vocab(snake_case_ )
# add an entry for [MASK2]
__magic_name__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__magic_name__ = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
__magic_name__ = AddedToken('''<ent>''' , lstrip=snake_case_ , rstrip=snake_case_ )
__magic_name__ = AddedToken('''<ent2>''' , lstrip=snake_case_ , rstrip=snake_case_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(snake_case_ )
with open(os.path.join(snake_case_ , '''tokenizer_config.json''' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
__magic_name__ = "MLukeTokenizer"
with open(os.path.join(snake_case_ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
with open(os.path.join(snake_case_ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
__magic_name__ = MLukeTokenizer.from_pretrained(snake_case_ )
# Initialize the embeddings of the special tokens
__magic_name__ = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
__magic_name__ = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
__magic_name__ = state_dict["embeddings.word_embeddings.weight"]
__magic_name__ = word_emb[ent_init_index].unsqueeze(0 )
__magic_name__ = word_emb[enta_init_index].unsqueeze(0 )
__magic_name__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__magic_name__ = state_dict[bias_name]
__magic_name__ = decoder_bias[ent_init_index].unsqueeze(0 )
__magic_name__ = decoder_bias[enta_init_index].unsqueeze(0 )
__magic_name__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__magic_name__ = f'encoder.layer.{layer_index}.attention.self.'
__magic_name__ = state_dict[prefix + matrix_name]
__magic_name__ = state_dict[prefix + matrix_name]
__magic_name__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__magic_name__ = state_dict["entity_embeddings.entity_embeddings.weight"]
__magic_name__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__magic_name__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__magic_name__ = state_dict["entity_predictions.bias"]
__magic_name__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__magic_name__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
__magic_name__ = LukeForMaskedLM(config=snake_case_ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
__magic_name__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
__magic_name__ = state_dict[key]
else:
__magic_name__ = state_dict[key]
__magic_name__ = model.load_state_dict(snake_case_ , strict=snake_case_ )
if set(snake_case_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(snake_case_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__magic_name__ = MLukeTokenizer.from_pretrained(snake_case_ , task='''entity_classification''' )
__magic_name__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__magic_name__ = (0, 9)
__magic_name__ = tokenizer(snake_case_ , entity_spans=[span] , return_tensors='''pt''' )
__magic_name__ = model(**snake_case_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__magic_name__ = torch.Size((1, 33, 768) )
__magic_name__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__magic_name__ = torch.Size((1, 1, 768) )
__magic_name__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__magic_name__ = MLukeTokenizer.from_pretrained(snake_case_ )
__magic_name__ = "Tokyo is the capital of <mask>."
__magic_name__ = (24, 30)
__magic_name__ = tokenizer(snake_case_ , entity_spans=[span] , return_tensors='''pt''' )
__magic_name__ = model(**snake_case_ )
__magic_name__ = encoding["input_ids"][0].tolist()
__magic_name__ = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
__magic_name__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(snake_case_ )
__magic_name__ = outputs.entity_logits[0][0].argmax().item()
__magic_name__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(snake_case_ ) )
model.save_pretrained(snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
__magic_name__ = ["[MASK]", "[PAD]", "[UNK]"]
__magic_name__ = [json.loads(snake_case_ ) for line in open(snake_case_ )]
__magic_name__ = {}
for entry in data:
__magic_name__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__magic_name__ = entity_id
break
__magic_name__ = f'{language}:{entity_name}'
__magic_name__ = entity_id
return new_mapping
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 721 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : str = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE_ ( lowercase__ ):
"""simple docstring"""
_a = """markuplm"""
def __init__( self , A=3_05_22 , A=7_68 , A=12 , A=12 , A=30_72 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=2 , A=0.02 , A=1E-12 , A=0 , A=0 , A=2 , A=2_56 , A=10_24 , A=2_16 , A=10_01 , A=32 , A=50 , A="absolute" , A=True , A=None , **A , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = use_cache
__magic_name__ = classifier_dropout
# additional properties
__magic_name__ = max_depth
__magic_name__ = max_xpath_tag_unit_embeddings
__magic_name__ = max_xpath_subs_unit_embeddings
__magic_name__ = tag_pad_id
__magic_name__ = subs_pad_id
__magic_name__ = xpath_unit_hidden_size
| 700 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline | 701 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
__magic_name__ = list(lowerCamelCase_ )
__magic_name__ = list(lowerCamelCase_ )
__magic_name__ = 0
for i in range(len(lowerCamelCase_ ) ):
if lista[i] != lista[i]:
count += 1
__magic_name__ = '''_'''
if count > 1:
return False
else:
return "".join(lowerCamelCase_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[str] ):
__magic_name__ = []
while True:
__magic_name__ = ['''$'''] * len(lowerCamelCase_ )
__magic_name__ = []
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
__magic_name__ = compare_string(binary[i] , binary[j] )
if k is False:
__magic_name__ = '''*'''
__magic_name__ = '''*'''
temp.append('''X''' )
for i in range(len(lowerCamelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCamelCase_ ) == 0:
return pi
__magic_name__ = list(set(lowerCamelCase_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Sequence[float] ):
__magic_name__ = []
for minterm in minterms:
__magic_name__ = ''''''
for _ in range(lowerCamelCase_ ):
__magic_name__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCamelCase_ )
return temp
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : int ):
__magic_name__ = list(lowerCamelCase_ )
__magic_name__ = list(lowerCamelCase_ )
__magic_name__ = 0
for i in range(len(lowerCamelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : list[str] ):
__magic_name__ = []
__magic_name__ = [0] * len(lowerCamelCase_ )
for i in range(len(chart[0] ) ):
__magic_name__ = 0
__magic_name__ = -1
for j in range(len(lowerCamelCase_ ) ):
if chart[j][i] == 1:
count += 1
__magic_name__ = j
if count == 1:
__magic_name__ = 1
for i in range(len(lowerCamelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCamelCase_ ) ):
__magic_name__ = 0
temp.append(prime_implicants[i] )
while True:
__magic_name__ = 0
__magic_name__ = -1
__magic_name__ = 0
for i in range(len(lowerCamelCase_ ) ):
__magic_name__ = chart[i].count(1 )
if count_n > max_n:
__magic_name__ = count_n
__magic_name__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCamelCase_ ) ):
__magic_name__ = 0
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[str] , snake_case_ : list[str] ):
__magic_name__ = [[0 for x in range(len(lowerCamelCase_ ) )] for x in range(len(lowerCamelCase_ ) )]
for i in range(len(lowerCamelCase_ ) ):
__magic_name__ = prime_implicants[i].count('''_''' )
for j in range(len(lowerCamelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCamelCase_ ):
__magic_name__ = 1
return chart
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = int(input('''Enter the no. of variables\n''' ) )
__magic_name__ = [
float(lowerCamelCase_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
__magic_name__ = decimal_to_binary(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = check(lowerCamelCase_ )
print('''Prime Implicants are:''' )
print(lowerCamelCase_ )
__magic_name__ = prime_implicant_chart(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = selection(lowerCamelCase_ , lowerCamelCase_ )
print('''Essential Prime Implicants are:''' )
print(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 702 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : Union[str, Any] = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ ):
"""simple docstring"""
_a = """lilt"""
def __init__( self , A=3_05_22 , A=7_68 , A=12 , A=12 , A=30_72 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=2 , A=0.02 , A=1E-12 , A=0 , A="absolute" , A=None , A=4 , A=10_24 , **A , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = classifier_dropout
__magic_name__ = channel_shrink_ratio
__magic_name__ = max_ad_position_embeddings | 703 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : List[Any] ):
__magic_name__ = b.T
__magic_name__ = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=1 )
__magic_name__ = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=0 )
__magic_name__ = np.matmul(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__magic_name__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Optional[Any] ):
__magic_name__ = x.reshape(-1 , 3 )
__magic_name__ = squared_euclidean_distance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return np.argmin(__SCREAMING_SNAKE_CASE , axis=1 )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
_a = ["""pixel_values"""]
def __init__( self , A = None , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = True , **A , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowercase_ )
__magic_name__ = size if size is not None else {"height": 2_56, "width": 2_56}
__magic_name__ = get_size_dict(lowercase_ )
__magic_name__ = np.array(lowercase_ ) if clusters is not None else None
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = resample
__magic_name__ = do_normalize
__magic_name__ = do_color_quantize
def __A ( self , A , A , A = PILImageResampling.BILINEAR , A = None , **A , ) -> Tuple:
'''simple docstring'''
__magic_name__ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
lowercase_ , size=(size['''height'''], size['''width''']) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __A ( self , A , A = None , ) -> Dict:
'''simple docstring'''
__magic_name__ = rescale(image=lowercase_ , scale=1 / 1_27.5 , data_format=lowercase_ )
__magic_name__ = image - 1
return image
def __A ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(lowercase_ )
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__magic_name__ = clusters if clusters is not None else self.clusters
__magic_name__ = np.array(lowercase_ )
__magic_name__ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
__magic_name__ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_normalize:
__magic_name__ = [self.normalize(image=lowercase_ ) for image in images]
if do_color_quantize:
__magic_name__ = [to_channel_dimension_format(lowercase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__magic_name__ = np.array(lowercase_ )
__magic_name__ = color_quantize(lowercase_ , lowercase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__magic_name__ = images.shape[0]
__magic_name__ = images.reshape(lowercase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__magic_name__ = list(lowercase_ )
else:
__magic_name__ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
__magic_name__ = {"input_ids": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ ) | 704 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 0 |
'''simple docstring'''
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
a_ : List[Any] = "facebook/wmt19-en-de"
a_ : int = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
a_ : int = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
a_ : Optional[int] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
a_ : Union[str, Any] = tokenizer(['Making tiny model'], return_tensors='pt')
a_ : Optional[int] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
a_ : str = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de | 705 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=4 , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_attention_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_choices
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_attention_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ = config_and_inputs
__magic_name__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
_a = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = FlaxAlbertModelTester(self )
@slow
def __A ( self ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained('''albert-base-v2''' )
__magic_name__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
__magic_name__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__magic_name__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__magic_name__ = model(A , attention_mask=A )[0]
__magic_name__ = (1, 11, 7_68)
self.assertEqual(output.shape , A )
__magic_name__ = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 706 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : List[str] = 16
a_ : Any = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : str , snake_case_ : List[str] , snake_case_ : Tuple ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**_SCREAMING_SNAKE_CASE )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_SCREAMING_SNAKE_CASE ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Tuple ):
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
__magic_name__ , __magic_name__ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
__magic_name__ = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(_SCREAMING_SNAKE_CASE ) + 1
__magic_name__ = evaluation_loop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.print('''resumed checkpoint performance:''' , _SCREAMING_SNAKE_CASE )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(_SCREAMING_SNAKE_CASE )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__magic_name__ = model(**_SCREAMING_SNAKE_CASE )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
__magic_name__ = evaluation_loop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main() | 707 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 0 |
from __future__ import annotations
a_ : Union[str, Any] = 10
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] ):
__magic_name__ = 1
__magic_name__ = max(snake_case_ )
while placement <= max_digit:
# declare and initialize empty buckets
__magic_name__ = [[] for _ in range(snake_case_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__magic_name__ = int((i / placement) % RADIX )
buckets[tmp].append(snake_case_ )
# put each buckets' contents into list_of_ints
__magic_name__ = 0
for b in range(snake_case_ ):
for i in buckets[b]:
__magic_name__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod() | 708 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = 42
_a = None
_a = None
def _SCREAMING_SNAKE_CASE ( snake_case_ : TreeNode | None ):
def is_valid_tree(snake_case_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCAmelCase__ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
snake_case_ : TreeNode | None , snake_case_ : float , snake_case_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowerCAmelCase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowerCAmelCase__ )
)
return is_binary_search_tree_recursive_check(lowerCAmelCase__ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 709 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ : Tuple = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : int = 1_6000 ):
__magic_name__ = int(round(sample_rate * max_length ) )
if len(snake_case_ ) <= sample_length:
return wav
__magic_name__ = randint(0 , len(snake_case_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(default=a__ , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_a = field(
default=a__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_a = field(
default=a__ , metadata={"""help""": """A file containing the training audio paths and labels."""} )
_a = field(
default=a__ , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
_a = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
_a = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
_a = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
_a = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
_a = field(
default=a__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_a = field(
default=a__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
_a = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
_a = field(
default=a__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_a = field(
default=a__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
_a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_a = field(
default=a__ , metadata={"""help""": """Name or path of preprocessor config."""} )
_a = field(
default=a__ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
_a = field(
default=a__ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
_a = field(
default=a__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_a = field(
default=a__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
_a = field(
default=a__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , lowerCamelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , snake_case_ , snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__magic_name__ = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__magic_name__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
__magic_name__ = DatasetDict()
__magic_name__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__magic_name__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__magic_name__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__magic_name__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__magic_name__ = feature_extractor.model_input_names[0]
def train_transforms(snake_case_ : int ):
__magic_name__ = []
for audio in batch[data_args.audio_column_name]:
__magic_name__ = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case_ )
__magic_name__ = feature_extractor(snake_case_ , sampling_rate=feature_extractor.sampling_rate )
__magic_name__ = {model_input_name: inputs.get(snake_case_ )}
__magic_name__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case_ : int ):
__magic_name__ = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
__magic_name__ = feature_extractor(snake_case_ , sampling_rate=feature_extractor.sampling_rate )
__magic_name__ = {model_input_name: inputs.get(snake_case_ )}
__magic_name__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__magic_name__ = raw_datasets['''train'''].features[data_args.label_column_name].names
__magic_name__ , __magic_name__ = {}, {}
for i, label in enumerate(snake_case_ ):
__magic_name__ = str(snake_case_ )
__magic_name__ = label
# Load the accuracy metric from the datasets package
__magic_name__ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case_ : int ):
__magic_name__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=snake_case_ , references=eval_pred.label_ids )
__magic_name__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case_ ) , labelaid=snake_case_ , idalabel=snake_case_ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__magic_name__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__magic_name__ = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case_ , output_all_columns=snake_case_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__magic_name__ = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case_ , output_all_columns=snake_case_ )
# Initialize our trainer
__magic_name__ = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
# Training
if training_args.do_train:
__magic_name__ = None
if training_args.resume_from_checkpoint is not None:
__magic_name__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__magic_name__ = last_checkpoint
__magic_name__ = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__magic_name__ = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case_ )
trainer.save_metrics('''eval''' , snake_case_ )
# Write model card and (optionally) push to hub
__magic_name__ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
if __name__ == "__main__":
main() | 710 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 0 |
import operator as op
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = []
__magic_name__ = lambda snake_case_ , snake_case_ : int(x / y ) # noqa: E731 integer division operation
__magic_name__ = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(snake_case_ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(snake_case_ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(snake_case_ ) , sep=''' | ''' )
else:
__magic_name__ = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(snake_case_ ) , sep=''' | ''' )
__magic_name__ = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(snake_case_ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(snake_case_ ) , int(snake_case_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(snake_case_ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
a_ : Union[str, Any] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 711 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ : str = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple ):
__magic_name__ = b.T
__magic_name__ = np.sum(np.square(__lowerCAmelCase ) , axis=1 )
__magic_name__ = np.sum(np.square(__lowerCAmelCase ) , axis=0 )
__magic_name__ = np.matmul(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : int ):
__magic_name__ = x.reshape(-1 , 3 )
__magic_name__ = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ):
"""simple docstring"""
_a = ["""pixel_values"""]
def __init__( self , A = None , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = True , **A , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
__magic_name__ = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
__magic_name__ = get_size_dict(lowerCAmelCase_ )
__magic_name__ = np.array(lowerCAmelCase_ ) if clusters is not None else None
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = resample
__magic_name__ = do_normalize
__magic_name__ = do_color_quantize
def __A ( self , A , A , A = PILImageResampling.BILINEAR , A = None , **A , ) -> Tuple:
'''simple docstring'''
__magic_name__ = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __A ( self , A , A = None , ) -> Tuple:
'''simple docstring'''
__magic_name__ = rescale(image=lowerCAmelCase_ , scale=1 / 1_27.5 , data_format=lowerCAmelCase_ )
__magic_name__ = image - 1
return image
def __A ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> List[Any]:
'''simple docstring'''
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(lowerCAmelCase_ )
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__magic_name__ = clusters if clusters is not None else self.clusters
__magic_name__ = np.array(lowerCAmelCase_ )
__magic_name__ = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__magic_name__ = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_normalize:
__magic_name__ = [self.normalize(image=lowerCAmelCase_ ) for image in images]
if do_color_quantize:
__magic_name__ = [to_channel_dimension_format(lowerCAmelCase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__magic_name__ = np.array(lowerCAmelCase_ )
__magic_name__ = color_quantize(lowerCAmelCase_ , lowerCAmelCase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__magic_name__ = images.shape[0]
__magic_name__ = images.reshape(lowerCAmelCase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__magic_name__ = list(lowerCAmelCase_ )
else:
__magic_name__ = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__magic_name__ = {'''input_ids''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) | 712 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 0 |
from math import pow
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__magic_name__ = int(pow(a_ , a_ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__magic_name__ = backtrack(
a_ , a_ , current_number + 1 , a_ , a_ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__magic_name__ = backtrack(
a_ , a_ , current_number + 1 , a_ , a_ )
return current_sum, solutions_count
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(a_ , a_ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod() | 713 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__magic_name__ = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_A ) , torch_builtin(_A ) ) )
self.assertFalse(torch.allclose(gelu_python(_A ) , gelu_new(_A ) ) )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__magic_name__ = get_activation('''gelu''' )
__magic_name__ = get_activation('''gelu_10''' )
__magic_name__ = torch_builtin(_A )
__magic_name__ = geluaa(_A )
__magic_name__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_A ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_A ):
get_activation('''bogus''' )
with self.assertRaises(_A ):
get_activation(_A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = get_activation('''gelu''' )
__magic_name__ = 1
__magic_name__ = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_A ):
__magic_name__ = acta.a | 714 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a_ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCAmelCase__ ):
return ext
raise Exception(
f'Unable to determine file format from file extension {path}. '
f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__magic_name__ = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
__magic_name__ = PipelineDataFormat.from_str(
format=UpperCAmelCase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(UpperCAmelCase__ , UpperCAmelCase__ )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , A , A ) -> int:
'''simple docstring'''
__magic_name__ = nlp
__magic_name__ = reader
@staticmethod
def __A ( A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=__UpperCamelCase , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=__UpperCamelCase , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=__UpperCamelCase , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=__UpperCamelCase , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=__UpperCamelCase , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=__UpperCamelCase , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=__UpperCamelCase , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=__UpperCamelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=__UpperCamelCase )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ , __magic_name__ = self._nlp, []
for entry in self._reader:
__magic_name__ = nlp(**__UpperCamelCase ) if self._reader.is_multi_columns else nlp(__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
outputs.append(__UpperCamelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__magic_name__ = self._reader.save_binary(__UpperCamelCase )
logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(__UpperCamelCase )
| 715 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PILImageResampling.BICUBIC , A = True , A = 1 / 2_55 , A = True , A = None , A = None , A = True , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A )
__magic_name__ = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
__magic_name__ = get_size_dict(A , default_to_square=A )
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = resample
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_normalize
__magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD
__magic_name__ = do_convert_rgb
def __A ( self , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = get_size_dict(A , default_to_square=A )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
__magic_name__ = (size['''height'''], size['''width'''])
return resize(A , size=A , resample=A , data_format=A , **A )
def __A ( self , A , A , A = None , **A , ) -> str:
'''simple docstring'''
return rescale(A , scale=A , data_format=A , **A )
def __A ( self , A , A , A , A = None , **A , ) -> Dict:
'''simple docstring'''
return normalize(A , mean=A , std=A , data_format=A , **A )
def __A ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = image_mean if image_mean is not None else self.image_mean
__magic_name__ = image_std if image_std is not None else self.image_std
__magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(A , default_to_square=A )
__magic_name__ = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__magic_name__ = [convert_to_rgb(A ) for image in images]
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(A ) for image in images]
if do_resize:
__magic_name__ = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_rescale:
__magic_name__ = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
__magic_name__ = [self.normalize(image=A , mean=A , std=A ) for image in images]
__magic_name__ = [to_channel_dimension_format(A , A ) for image in images]
__magic_name__ = BatchFeature(data={'''pixel_values''': images} , tensor_type=A )
return encoded_outputs | 716 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if not nums:
return 0
__magic_name__ = nums[0]
__magic_name__ = 0
for num in nums[1:]:
__magic_name__ , __magic_name__ = (
max_excluding + num,
max(snake_case_ , snake_case_ ),
)
return max(snake_case_ , snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCamelCase__ )
__magic_name__ = -1
__magic_name__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase__ )
__magic_name__ = model.generate(lowerCamelCase__ , max_new_tokens=10 , do_sample=lowerCamelCase__ )
__magic_name__ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__magic_name__ = TextStreamer(lowerCamelCase__ )
model.generate(lowerCamelCase__ , max_new_tokens=10 , do_sample=lowerCamelCase__ , streamer=lowerCamelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__magic_name__ = cs.out[:-1]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCamelCase__ )
__magic_name__ = -1
__magic_name__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase__ )
__magic_name__ = model.generate(lowerCamelCase__ , max_new_tokens=10 , do_sample=lowerCamelCase__ )
__magic_name__ = tokenizer.decode(greedy_ids[0] )
__magic_name__ = TextIteratorStreamer(lowerCamelCase__ )
__magic_name__ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__magic_name__ = Thread(target=model.generate , kwargs=lowerCamelCase__ )
thread.start()
__magic_name__ = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCamelCase__ )
__magic_name__ = -1
__magic_name__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase__ )
__magic_name__ = model.generate(lowerCamelCase__ , max_new_tokens=10 , do_sample=lowerCamelCase__ )
__magic_name__ = greedy_ids[:, input_ids.shape[1] :]
__magic_name__ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__magic_name__ = TextStreamer(lowerCamelCase__ , skip_prompt=lowerCamelCase__ )
model.generate(lowerCamelCase__ , max_new_tokens=10 , do_sample=lowerCamelCase__ , streamer=lowerCamelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__magic_name__ = cs.out[:-1]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''distilgpt2''' )
__magic_name__ = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(lowerCamelCase__ )
__magic_name__ = -1
__magic_name__ = torch.ones((1, 5) , device=lowerCamelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__magic_name__ = TextStreamer(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
model.generate(lowerCamelCase__ , max_new_tokens=1 , do_sample=lowerCamelCase__ , streamer=lowerCamelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__magic_name__ = cs.out[:-1] # Remove the final "\n"
__magic_name__ = tokenizer(lowerCamelCase__ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCamelCase__ )
__magic_name__ = -1
__magic_name__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase__ )
__magic_name__ = TextIteratorStreamer(lowerCamelCase__ , timeout=0.0_01 )
__magic_name__ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__magic_name__ = Thread(target=model.generate , kwargs=lowerCamelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase__ ):
__magic_name__ = ''''''
for new_text in streamer:
streamer_text += new_text | 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a_ : Tuple = logging.get_logger(__name__)
a_ : int = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
a_ : int = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
a_ : int = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
a_ : List[str] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
a_ : int = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
a_ : List[Any] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
a_ : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
a_ : Dict = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
a_ : Optional[Any] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
a_ : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
a_ : int = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
a_ : List[str] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
a_ : Tuple = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
a_ : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
a_ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_MAPPING
a_ : Dict = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_PRETRAINING_MAPPING
a_ : List[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a_ : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_MASKED_LM_MAPPING
a_ : str = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ : Dict = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a_ : Dict = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ : int = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a_ : Optional[int] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a_ : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a_ : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
"""simple docstring"""
_a = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a_ : Tuple = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 719 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Any:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Any:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __A ( self , A , A , A , A , A , A ) -> int:
'''simple docstring'''
__magic_name__ = DistilBertModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , A , A ) -> Dict:
'''simple docstring'''
__magic_name__ = DistilBertForMaskedLM(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = DistilBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
__magic_name__ = model(
A , attention_mask=A , start_positions=A , end_positions=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = DistilBertForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A , A , A , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = DistilBertForTokenClassification(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A , A , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_choices
__magic_name__ = DistilBertForMultipleChoice(config=A )
model.to(A )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
A , attention_mask=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__)) = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_a = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
_a = True
_a = True
_a = True
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = DistilBertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , dim=37 )
def __A ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = DistilBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__magic_name__ = True
__magic_name__ = model_class(config=A )
__magic_name__ = self._prepare_for_class(A , A )
__magic_name__ = torch.jit.trace(
A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A , os.path.join(A , '''traced_model.pt''' ) )
__magic_name__ = torch.jit.load(os.path.join(A , '''traced_model.pt''' ) , map_location=A )
loaded(inputs_dict['''input_ids'''].to(A ) , inputs_dict['''attention_mask'''].to(A ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__magic_name__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__magic_name__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ = model(A , attention_mask=A )[0]
__magic_name__ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , A )
__magic_name__ = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) ) | 720 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : List[str] ):
if len(lowerCamelCase__ ) == 0:
return False
__magic_name__ = len(lowerCamelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCamelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCamelCase__ )
if __name__ == "__main__":
a_ : Optional[int] = input('Enter numbers separated by comma:\n').strip()
a_ : List[Any] = [int(item.strip()) for item in user_input.split(',')]
a_ : str = int(input('Enter the number to be found in the list:\n').strip())
a_ : List[str] = '' if binary_search(sequence, target) else 'not '
print(F"""{target} was {not_str}found in {sequence}""") | 721 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__magic_name__ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__magic_name__ = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
__magic_name__ = tf_top_k_top_p_filtering(A , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__magic_name__ = output[output != -float('''inf''' )]
__magic_name__ = tf.cast(
tf.where(tf.not_equal(A , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(A , A , rtol=1E-12 )
tf.debugging.assert_equal(A , A )
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if is_tf_available():
_a = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = 2
__magic_name__ = 2
class SCREAMING_SNAKE_CASE_ ( tf.Module ):
"""simple docstring"""
def __init__( self , A ) -> Any:
'''simple docstring'''
super(A , self ).__init__()
__magic_name__ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=A , )
def __A ( self , A , A ) -> int:
'''simple docstring'''
__magic_name__ = self.model.generate(
input_ids=A , attention_mask=A , max_new_tokens=A , return_dict_in_generate=A , )
return {"sequences": outputs["sequences"]}
__magic_name__ = [[2, 0], [1_02, 1_03]]
__magic_name__ = [[1, 0], [1, 1]]
__magic_name__ = DummyModel(model=A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A , A , signatures={'''serving_default''': dummy_model.serving} )
__magic_name__ = tf.saved_model.load(A ).signatures['''serving_default''']
for batch_size in range(1 , len(A ) + 1 ):
__magic_name__ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
__magic_name__ = serving_func(**A )['''sequences''']
__magic_name__ = test_model.generate(**A , max_new_tokens=A )
tf.debugging.assert_equal(A , A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = 1
__magic_name__ = 2
class SCREAMING_SNAKE_CASE_ ( tf.Module ):
"""simple docstring"""
def __init__( self , A ) -> str:
'''simple docstring'''
super(A , self ).__init__()
__magic_name__ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=A , )
def __A ( self , A , A ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.model.generate(
input_ids=A , attention_mask=A , max_new_tokens=A , return_dict_in_generate=A , )
return {"sequences": outputs["sequences"]}
__magic_name__ = [[2], [1_02, 1_03]]
__magic_name__ = [[1], [1, 1]]
__magic_name__ = DummyModel(model=A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A , A , signatures={'''serving_default''': dummy_model.serving} )
__magic_name__ = tf.saved_model.load(A ).signatures['''serving_default''']
for input_row in range(len(A ) ):
__magic_name__ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
__magic_name__ = serving_func(**A )['''sequences''']
__magic_name__ = test_model.generate(**A , max_new_tokens=A )
tf.debugging.assert_equal(A , A )
@slow
@require_tensorflow_text
def __A ( self ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=A )
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__magic_name__ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(A , '''spiece.model''' ) , '''rb''' ).read() )
__magic_name__ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def __A ( self , A , *A , **A ) -> Dict:
'''simple docstring'''
__magic_name__ = self.tokenizer.tokenize(A )
__magic_name__ , __magic_name__ = text.pad_model_inputs(
A , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__magic_name__ = self.model.generate(input_ids=A , attention_mask=A )
return self.tokenizer.detokenize(A )
__magic_name__ = CompleteSentenceTransformer()
__magic_name__ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
__magic_name__ = complete_model(A )
__magic_name__ = tf.keras.Model(A , A )
keras_model.save(A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
__magic_name__ = 14
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = '''Hello, my dog is cute and'''
__magic_name__ = tokenizer(A , return_tensors='''tf''' )
__magic_name__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__magic_name__ = model.generate(**A , eos_token_id=A , **A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__magic_name__ = [6_38, 1_98]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__magic_name__ = model.generate(**A , eos_token_id=A , **A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__magic_name__ = '''Hugging Face is a technology company based in New York and Paris.'''
__magic_name__ = bart_tokenizer(A , return_tensors='''tf''' ).input_ids
__magic_name__ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__magic_name__ = bart_model.generate(A ).numpy()
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self , A , A=None , **A ) -> Optional[int]:
'''simple docstring'''
return super().call(A , **A )
__magic_name__ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__magic_name__ = bart_model.generate(A , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(A , A ) )
class SCREAMING_SNAKE_CASE_ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def __A ( self , A , **A ) -> Any:
'''simple docstring'''
return super().call(A , **A )
__magic_name__ = FakeEncoder(bart_model.config , bart_model.model.shared )
__magic_name__ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__magic_name__ = bart_model.generate(A ).numpy()
with self.assertRaises(A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(A , foo='''bar''' )
| 700 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = 1
__magic_name__ = 3
__magic_name__ = (32, 32)
__magic_name__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def __A ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __A ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
return CLIPTextModel(A )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.dummy_cond_unet_upscale
__magic_name__ = DDPMScheduler()
__magic_name__ = DDIMScheduler(prediction_type='''v_prediction''' )
__magic_name__ = self.dummy_vae
__magic_name__ = self.dummy_text_encoder
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__magic_name__ = StableDiffusionUpscalePipeline(
unet=A , low_res_scheduler=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , max_noise_level=3_50 , )
__magic_name__ = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = torch.Generator(device=A ).manual_seed(0 )
__magic_name__ = sd_pipe(
[prompt] , image=A , generator=A , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__magic_name__ = output.images
__magic_name__ = torch.Generator(device=A ).manual_seed(0 )
__magic_name__ = sd_pipe(
[prompt] , image=A , generator=A , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=A , )[0]
__magic_name__ = image[0, -3:, -3:, -1]
__magic_name__ = image_from_tuple[0, -3:, -3:, -1]
__magic_name__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__magic_name__ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.dummy_cond_unet_upscale
__magic_name__ = DDPMScheduler()
__magic_name__ = DDIMScheduler(prediction_type='''v_prediction''' )
__magic_name__ = self.dummy_vae
__magic_name__ = self.dummy_text_encoder
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__magic_name__ = StableDiffusionUpscalePipeline(
unet=A , low_res_scheduler=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , max_noise_level=3_50 , )
__magic_name__ = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__magic_name__ = output.images
assert image.shape[0] == 2
__magic_name__ = torch.Generator(device=A ).manual_seed(0 )
__magic_name__ = sd_pipe(
[prompt] , image=A , generator=A , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__magic_name__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.dummy_cond_unet_upscale
__magic_name__ = DDPMScheduler()
__magic_name__ = DDIMScheduler(prediction_type='''v_prediction''' )
__magic_name__ = self.dummy_vae
__magic_name__ = self.dummy_text_encoder
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__magic_name__ = unet.half()
__magic_name__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
__magic_name__ = StableDiffusionUpscalePipeline(
unet=A , low_res_scheduler=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , max_noise_level=3_50 , )
__magic_name__ = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = sd_pipe(
[prompt] , image=A , generator=A , num_inference_steps=2 , output_type='''np''' , ).images
__magic_name__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__magic_name__ = '''stabilityai/stable-diffusion-x4-upscaler'''
__magic_name__ = StableDiffusionUpscalePipeline.from_pretrained(A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
__magic_name__ = '''a cat sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=A , image=A , generator=A , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__magic_name__ = '''stabilityai/stable-diffusion-x4-upscaler'''
__magic_name__ = StableDiffusionUpscalePipeline.from_pretrained(
A , torch_dtype=torch.floataa , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
__magic_name__ = '''a cat sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=A , image=A , generator=A , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __A ( self ) -> List[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__magic_name__ = '''stabilityai/stable-diffusion-x4-upscaler'''
__magic_name__ = StableDiffusionUpscalePipeline.from_pretrained(
A , torch_dtype=torch.floataa , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__ = '''a cat sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=A , image=A , generator=A , num_inference_steps=5 , output_type='''np''' , )
__magic_name__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9 | 701 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 0 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 702 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 703 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__magic_name__ = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def __A ( self , **A ) -> List[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> Any:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> Any:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__magic_name__ = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = self.get_image_processor()
__magic_name__ = AlignProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
__magic_name__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=A )
__magic_name__ = AlignProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
__magic_name__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__magic_name__ = self.get_image_processor(do_normalize=A , padding_value=1.0 )
__magic_name__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=A , image_processor=A )
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = image_processor(A , return_tensors='''np''' )
__magic_name__ = processor(images=A , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=A , image_processor=A )
__magic_name__ = '''lower newer'''
__magic_name__ = processor(text=A )
__magic_name__ = tokenizer(A , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=A , image_processor=A )
__magic_name__ = '''lower newer'''
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=A , image_processor=A )
__magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ = processor.batch_decode(A )
__magic_name__ = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=A , image_processor=A )
__magic_name__ = '''lower newer'''
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 704 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 0 |
'''simple docstring'''
a_ : List[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str ):
# Return True if there is node that has not iterated.
__magic_name__ = [False] * len(snake_case_ )
__magic_name__ = [s]
__magic_name__ = True
while queue:
__magic_name__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case_ )
__magic_name__ = True
__magic_name__ = u
return visited[t]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : str ):
__magic_name__ = [-1] * (len(snake_case_ ))
__magic_name__ = 0
__magic_name__ = []
__magic_name__ = [i[:] for i in graph] # Record original cut, copy.
while bfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
__magic_name__ = float('''Inf''' )
__magic_name__ = sink
while s != source:
# Find the minimum value in select path
__magic_name__ = min(snake_case_ , graph[parent[s]][s] )
__magic_name__ = parent[s]
max_flow += path_flow
__magic_name__ = sink
while v != source:
__magic_name__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__magic_name__ = parent[v]
for i in range(len(snake_case_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 705 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ : Tuple = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
a_ : str = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
a_ : Union[str, Any] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
a_ : Optional[int] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
a_ : Any = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int ):
for tf_name, hf_name in patterns:
__magic_name__ = k.replace(snake_case_ , snake_case_ )
return k
def _SCREAMING_SNAKE_CASE ( snake_case_ : dict , snake_case_ : dict ):
__magic_name__ = BigBirdPegasusConfig(**snake_case_ )
__magic_name__ = BigBirdPegasusForConditionalGeneration(snake_case_ )
__magic_name__ = torch_model.state_dict()
__magic_name__ = {}
# separating decoder weights
__magic_name__ = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__magic_name__ = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
__magic_name__ = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE]
if any(snake_case_ ):
continue
__magic_name__ = DECODER_PATTERNS
__magic_name__ = rename_state_dict_key(snake_case_ , snake_case_ )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__magic_name__ = v.T
__magic_name__ = torch.from_numpy(snake_case_ )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
__magic_name__ = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE]
if any(snake_case_ ):
continue
__magic_name__ = REMAINING_PATTERNS
__magic_name__ = rename_state_dict_key(snake_case_ , snake_case_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__magic_name__ = v.T
__magic_name__ = torch.from_numpy(snake_case_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__magic_name__ = mapping['''model.embed_positions.weight''']
__magic_name__ = mapping.pop('''model.embed_positions.weight''' )
__magic_name__ , __magic_name__ = torch_model.load_state_dict(snake_case_ , strict=snake_case_ )
__magic_name__ = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
__magic_name__ = tf.train.list_variables(snake_case_ )
__magic_name__ = {}
__magic_name__ = ['''global_step''']
for name, shape in tqdm(snake_case_ , desc='''converting tf checkpoint to dict''' ):
__magic_name__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
__magic_name__ = tf.train.load_variable(snake_case_ , snake_case_ )
__magic_name__ = array
return tf_weights
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : dict ):
__magic_name__ = get_tf_weights_as_numpy(snake_case_ )
__magic_name__ = convert_bigbird_pegasus(snake_case_ , snake_case_ )
torch_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
a_ : Dict = parser.parse_args()
a_ : int = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 706 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 0 |
import math
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : float = 1 / 1_2345 ):
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 3
while True:
__magic_name__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(snake_case_ ):
__magic_name__ = int(snake_case_ )
total_partitions += 1
if check_partition_perfect(snake_case_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(snake_case_ )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""") | 707 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 0 |
from collections import defaultdict
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A ) -> int:
'''simple docstring'''
__magic_name__ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__magic_name__ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(A ) )
]
__magic_name__ = defaultdict(A ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__magic_name__ = (1 << len(A )) - 1
def __A ( self , A , A ) -> Union[str, Any]:
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__magic_name__ = self.count_ways_until(A , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__magic_name__ = total_ways_util
return self.dp[mask][task_no]
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
for i in range(len(A ) ):
for j in task_performed[i]:
self.task[j].append(A )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
a_ : int = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
a_ : Tuple = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 708 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Union[str, Any] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 709 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a_ : Any = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether tp freeze the encoder."""} )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
_a = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
_a = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a = field(
default=128 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
_a = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
_a = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
_a = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Source language id for translation."""} )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Target language id for translation."""} )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """# num_beams to use for evaluation."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[Any] ):
logger.info(f'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(f' {key} = {metrics[key]}' )
save_json(snake_case_ , os.path.join(snake_case_ , f'{split}_results.json' ) )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__magic_name__ = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__magic_name__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__magic_name__ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__magic_name__ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__magic_name__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__magic_name__ = SeqaSeqDataset
# Get datasets
__magic_name__ = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__magic_name__ = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__magic_name__ = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__magic_name__ = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__magic_name__ = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__magic_name__ = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__magic_name__ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__magic_name__ = train_result.metrics
__magic_name__ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__magic_name__ = trainer.evaluate(metric_key_prefix='''val''' )
__magic_name__ = data_args.n_val
__magic_name__ = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__magic_name__ = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__magic_name__ = test_output.metrics
__magic_name__ = data_args.n_test
if trainer.is_world_process_zero():
__magic_name__ = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__magic_name__ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__magic_name__ = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 710 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Optional[Any]=1024 , snake_case_ : Optional[int]=1024 , snake_case_ : Optional[Any]=False , **snake_case_ : Dict ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = SeqaSeqDataset(snake_case_ , snake_case_ , snake_case_ , snake_case_ , type_path='''train''' , **snake_case_ )
__magic_name__ = tok.pad_token_id
def get_lens(snake_case_ : Union[str, Any] ):
__magic_name__ = tqdm(
DataLoader(snake_case_ , batch_size=512 , num_workers=8 , shuffle=snake_case_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__magic_name__ = []
for batch in dl:
__magic_name__ = batch['''input_ids'''].ne(snake_case_ ).sum(1 ).tolist()
__magic_name__ = batch['''labels'''].ne(snake_case_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(snake_case_ , snake_case_ ):
max_lens.append(max(snake_case_ , snake_case_ ) )
else:
max_lens.extend(snake_case_ )
return max_lens
__magic_name__ = get_lens(snake_case_ )
__magic_name__ = SeqaSeqDataset(snake_case_ , snake_case_ , snake_case_ , snake_case_ , type_path='''val''' , **snake_case_ )
__magic_name__ = get_lens(snake_case_ )
pickle_save(snake_case_ , train_ds.len_file )
pickle_save(snake_case_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 711 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : str ):
__magic_name__ = 1.5
__magic_name__ = int(factor * num_class_images )
__magic_name__ = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case_ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=snake_case_ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
__magic_name__ = client.query(text=snake_case_ )
if len(snake_case_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__magic_name__ = int(factor * num_images )
__magic_name__ = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case_ , aesthetic_weight=0.1 , )
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = tqdm(desc='''downloading real regularization images''' , total=snake_case_ )
with open(f'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(f'{class_data_dir}/urls.txt' , '''w''' ) as fa, open(
f'{class_data_dir}/images.txt' , '''w''' ) as fa:
while total < num_class_images:
__magic_name__ = class_images[count]
count += 1
try:
__magic_name__ = requests.get(images['''url'''] )
if img.status_code == 200:
__magic_name__ = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'{class_data_dir}/images/{total}.jpg' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser('''''' , add_help=snake_case_ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=snake_case_ , type=snake_case_ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=snake_case_ , type=snake_case_ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=snake_case_ )
return parser.parse_args()
if __name__ == "__main__":
a_ : Union[str, Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 712 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 0 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
a_ : List[str] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple ):
return max(metric_fn(snake_case_ , snake_case_ ) for gt in ground_truths )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : str ):
__magic_name__ = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__magic_name__ = []
if args.gold_data_mode == "qa":
__magic_name__ = pd.read_csv(snake_case_ , sep='''\t''' , header=snake_case_ )
for answer_list in data[1]:
__magic_name__ = ast.literal_eval(snake_case_ )
answers.append(snake_case_ )
else:
__magic_name__ = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__magic_name__ = [[reference] for reference in references]
__magic_name__ = __magic_name__ = __magic_name__ = 0
for prediction, ground_truths in zip(snake_case_ , snake_case_ ):
total += 1
em += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
fa += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = 100.0 * em / total
__magic_name__ = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int , snake_case_ : Optional[int] ):
__magic_name__ = args.k
__magic_name__ = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__magic_name__ = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__magic_name__ = __magic_name__ = 0
for hypo, reference in zip(snake_case_ , snake_case_ ):
__magic_name__ = set(hypo.split('''\t''' )[:k] )
__magic_name__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__magic_name__ = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict , snake_case_ : List[str] ):
def strip_title(snake_case_ : List[str] ):
if title.startswith('''"''' ):
__magic_name__ = title[1:]
if title.endswith('''"''' ):
__magic_name__ = title[:-1]
return title
__magic_name__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors='''pt''' , padding=snake_case_ , truncation=snake_case_ , )['''input_ids'''].to(args.device )
__magic_name__ = rag_model.rag.question_encoder(snake_case_ )
__magic_name__ = question_enc_outputs[0]
__magic_name__ = rag_model.retriever(
snake_case_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
__magic_name__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__magic_name__ = []
for docs in all_docs:
__magic_name__ = [strip_title(snake_case_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(snake_case_ ) )
return provenance_strings
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict ):
with torch.no_grad():
__magic_name__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors='''pt''' , padding=snake_case_ , truncation=snake_case_ )
__magic_name__ = inputs_dict.input_ids.to(args.device )
__magic_name__ = inputs_dict.attention_mask.to(args.device )
__magic_name__ = rag_model.generate( # rag_model overwrites generate
snake_case_ , attention_mask=snake_case_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=snake_case_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__magic_name__ = rag_model.retriever.generator_tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
if args.print_predictions:
for q, a in zip(snake_case_ , snake_case_ ):
logger.info('''Q: {} - A: {}'''.format(snake_case_ , snake_case_ ) )
return answers
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=snake_case_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=snake_case_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=snake_case_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=snake_case_ , type=snake_case_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=snake_case_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=snake_case_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=snake_case_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=snake_case_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=snake_case_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=snake_case_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=snake_case_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=snake_case_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=snake_case_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = {}
if args.model_type is None:
__magic_name__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
__magic_name__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
__magic_name__ = args.n_docs
if args.index_name is not None:
__magic_name__ = args.index_name
if args.index_path is not None:
__magic_name__ = args.index_path
else:
__magic_name__ = BartForConditionalGeneration
__magic_name__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , snake_case_ )
__magic_name__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
__magic_name__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(snake_case_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
__magic_name__ = RagRetriever.from_pretrained(snake_case_ , **snake_case_ )
__magic_name__ = model_class.from_pretrained(snake_case_ , retriever=snake_case_ , **snake_case_ )
model.retriever.init_retrieval()
else:
__magic_name__ = model_class.from_pretrained(snake_case_ , **snake_case_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
__magic_name__ = []
for line in tqdm(snake_case_ ):
questions.append(line.strip() )
if len(snake_case_ ) == args.eval_batch_size:
__magic_name__ = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write('''\n'''.join(snake_case_ ) + '''\n''' )
preds_file.flush()
__magic_name__ = []
if len(snake_case_ ) > 0:
__magic_name__ = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write('''\n'''.join(snake_case_ ) )
preds_file.flush()
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
a_ : Tuple = get_args()
main(args) | 713 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = str(id_ )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = []
__magic_name__ = {} # {vertex:distance}
def __lt__( self , A ) -> Optional[Any]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> int:
'''simple docstring'''
return self.id
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
self.neighbors.append(A )
def __A ( self , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = weight
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Optional[int] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , snake_case_ )
graph[b - 1].add_edge(graph[a - 1] , snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : Vertex ):
__magic_name__ = []
for u in graph:
__magic_name__ = math.inf
__magic_name__ = None
__magic_name__ = 0
__magic_name__ = graph[:]
while q:
__magic_name__ = min(snake_case_ )
q.remove(snake_case_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__magic_name__ = u
__magic_name__ = u.edges[v.id]
for i in range(1 , len(snake_case_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : Vertex ):
for u in graph:
__magic_name__ = math.inf
__magic_name__ = None
__magic_name__ = 0
__magic_name__ = list(snake_case_ )
hq.heapify(snake_case_ )
while h:
__magic_name__ = hq.heappop(snake_case_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__magic_name__ = u
__magic_name__ = u.edges[v.id]
hq.heapify(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _SCREAMING_SNAKE_CASE ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 714 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 0 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = int(number**0.5 )
return number == sq * sq
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
__magic_name__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__magic_name__ = x_den * y_den * z_den
__magic_name__ = gcd(snake_case_ , snake_case_ )
top //= hcf
bottom //= hcf
return top, bottom
def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 35 ):
__magic_name__ = set()
__magic_name__ = 42
__magic_name__ = Fraction(0 )
__magic_name__ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__magic_name__ = x_num * y_den + x_den * y_num
__magic_name__ = x_den * y_den
__magic_name__ = gcd(snake_case_ , snake_case_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__magic_name__ = add_three(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
unique_s.add(snake_case_ )
# n=2
__magic_name__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__magic_name__ = x_den * x_den * y_den * y_den
if is_sq(snake_case_ ) and is_sq(snake_case_ ):
__magic_name__ = int(sqrt(snake_case_ ) )
__magic_name__ = int(sqrt(snake_case_ ) )
__magic_name__ = gcd(snake_case_ , snake_case_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__magic_name__ = add_three(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
unique_s.add(snake_case_ )
# n=-1
__magic_name__ = x_num * y_num
__magic_name__ = x_den * y_num + x_num * y_den
__magic_name__ = gcd(snake_case_ , snake_case_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__magic_name__ = add_three(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
unique_s.add(snake_case_ )
# n=2
__magic_name__ = x_num * x_num * y_num * y_num
__magic_name__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(snake_case_ ) and is_sq(snake_case_ ):
__magic_name__ = int(sqrt(snake_case_ ) )
__magic_name__ = int(sqrt(snake_case_ ) )
__magic_name__ = gcd(snake_case_ , snake_case_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__magic_name__ = add_three(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
unique_s.add(snake_case_ )
for num, den in unique_s:
total += Fraction(snake_case_ , snake_case_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 715 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 0 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Optional[int] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
__magic_name__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__magic_name__ = 128
elif "12-12" in model_name:
__magic_name__ = 12
__magic_name__ = 12
elif "14-14" in model_name:
__magic_name__ = 14
__magic_name__ = 14
elif "16-16" in model_name:
__magic_name__ = 16
__magic_name__ = 16
else:
raise ValueError('''Model not supported''' )
__magic_name__ = '''huggingface/label-files'''
if "speech-commands" in model_name:
__magic_name__ = 35
__magic_name__ = '''speech-commands-v2-id2label.json'''
else:
__magic_name__ = 527
__magic_name__ = '''audioset-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(snake_case_ ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
if "module.v" in name:
__magic_name__ = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
__magic_name__ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
__magic_name__ = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
__magic_name__ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__magic_name__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
__magic_name__ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__magic_name__ = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
__magic_name__ = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
__magic_name__ = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[int] ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[3] )
__magic_name__ = config.hidden_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[dim : dim * 2, :]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[:dim]
__magic_name__ = val[dim : dim * 2]
__magic_name__ = val[-dim:]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
__magic_name__ = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=False ):
__magic_name__ = get_audio_spectrogram_transformer_config(snake_case_ )
__magic_name__ = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
__magic_name__ = model_name_to_url[model_name]
__magic_name__ = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' )
# remove some keys
remove_keys(snake_case_ )
# rename some keys
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
# load 🤗 model
__magic_name__ = ASTForAudioClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__magic_name__ = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
__magic_name__ = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
__magic_name__ = 1024 if '''speech-commands''' not in model_name else 128
__magic_name__ = ASTFeatureExtractor(mean=snake_case_ , std=snake_case_ , max_length=snake_case_ )
if "speech-commands" in model_name:
__magic_name__ = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
__magic_name__ = dataset[0]['''audio''']['''array''']
else:
__magic_name__ = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
__magic_name__ , __magic_name__ = torchaudio.load(snake_case_ )
__magic_name__ = waveform.squeeze().numpy()
__magic_name__ = feature_extractor(snake_case_ , sampling_rate=1_6000 , return_tensors='''pt''' )
# forward pass
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__magic_name__ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__magic_name__ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__magic_name__ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__magic_name__ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__magic_name__ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__magic_name__ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__magic_name__ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__magic_name__ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(snake_case_ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : List[Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 716 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = 10
__magic_name__ = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
__magic_name__ = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(snake_case_ ) ),
} , features=snake_case_ , )
return dataset
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[Any] ):
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=snake_case_ )
return filename
# FILE_CONTENT + files
a_ : Optional[Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
__magic_name__ = FILE_CONTENT
with open(snake_case_ , '''w''' ) as f:
f.write(snake_case_ )
return filename
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
import bza
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
__magic_name__ = bytes(snake_case_ , '''utf-8''' )
with bza.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
import gzip
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
__magic_name__ = bytes(snake_case_ , '''utf-8''' )
with gzip.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
__magic_name__ = bytes(snake_case_ , '''utf-8''' )
with lza.frame.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any] ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(snake_case_ , '''w''' ) as archive:
archive.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : str ):
import tarfile
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(snake_case_ , '''w''' ) as f:
f.add(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
import lzma
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
__magic_name__ = bytes(snake_case_ , '''utf-8''' )
with lzma.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
import zipfile
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
__magic_name__ = bytes(snake_case_ , '''utf-8''' )
with zstd.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
__magic_name__ = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(snake_case_ , '''w''' ) as f:
f.write(snake_case_ )
return filename
a_ : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
a_ : Tuple = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
a_ : List[str] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
a_ : Optional[int] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
a_ : str = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
__magic_name__ = datasets.Dataset.from_dict(snake_case_ )
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(snake_case_ ) ) as con:
__magic_name__ = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(snake_case_ , '''w''' , newline='''''' ) as f:
__magic_name__ = csv.DictWriter(snake_case_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(snake_case_ , '''w''' , newline='''''' ) as f:
__magic_name__ = csv.DictWriter(snake_case_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : str ):
import bza
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : List[str] ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(snake_case_ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : int ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
__magic_name__ = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(snake_case_ , '''wb''' ) as f:
__magic_name__ = pq.ParquetWriter(snake_case_ , schema=snake_case_ )
__magic_name__ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(snake_case_ ) )] for k in DATA[0]} , schema=snake_case_ )
writer.write_table(snake_case_ )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
__magic_name__ = {'''data''': DATA}
with open(snake_case_ , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
__magic_name__ = {'''data''': DATA_DICT_OF_LISTS}
with open(snake_case_ , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(snake_case_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(snake_case_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(snake_case_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(snake_case_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(snake_case_ , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(snake_case_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(snake_case_ , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(snake_case_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[Any] ):
import gzip
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(snake_case_ , '''rb''' ) as orig_file:
with gzip.open(snake_case_ , '''wb''' ) as zipped_file:
zipped_file.writelines(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Dict ):
import gzip
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(snake_case_ , '''rb''' ) as orig_file:
with gzip.open(snake_case_ , '''wb''' ) as zipped_file:
zipped_file.writelines(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Dict ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.join('''nested''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Any ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(snake_case_ , '''w''' ) as f:
f.add(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.add(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Optional[int] ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(snake_case_ , '''w''' ) as f:
f.add(snake_case_ , arcname=os.path.join('''nested''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
__magic_name__ = ['''0''', '''1''', '''2''', '''3''']
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(snake_case_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = ['''0''', '''1''', '''2''', '''3''']
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(snake_case_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = ['''0''', '''1''', '''2''', '''3''']
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(snake_case_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int , snake_case_ : Any ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Optional[int] ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(snake_case_ , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
__magic_name__ = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
__magic_name__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Dict ):
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 717 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """The column name of the images in the files."""} )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """A folder containing the training data."""} )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """A folder containing the validation data."""} )
_a = field(
default=0.1_5 , metadata={"""help""": """Percent to split off of train for validation."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = {}
if self.train_dir is not None:
__magic_name__ = self.train_dir
if self.validation_dir is not None:
__magic_name__ = self.validation_dir
__magic_name__ = data_files if data_files else None
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Name or path of preprocessor config."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_a = field(
default=0.7_5 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
__magic_name__ = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , snake_case_ , snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__magic_name__ = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__magic_name__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__magic_name__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__magic_name__ = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case_ ) and data_args.train_val_split > 0.0:
__magic_name__ = ds['''train'''].train_test_split(data_args.train_val_split )
__magic_name__ = split['''train''']
__magic_name__ = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__magic_name__ = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case_ )
elif model_args.model_name_or_path:
__magic_name__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
__magic_name__ = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__magic_name__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case_ )
elif model_args.model_name_or_path:
__magic_name__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
__magic_name__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__magic_name__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__magic_name__ = ViTMAEForPreTraining(snake_case_ )
if training_args.do_train:
__magic_name__ = ds['''train'''].column_names
else:
__magic_name__ = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__magic_name__ = data_args.image_column_name
elif "image" in column_names:
__magic_name__ = '''image'''
elif "img" in column_names:
__magic_name__ = '''img'''
else:
__magic_name__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__magic_name__ = image_processor.size['''shortest_edge''']
else:
__magic_name__ = (image_processor.size['''height'''], image_processor.size['''width'''])
__magic_name__ = Compose(
[
Lambda(lambda snake_case_ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(snake_case_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(snake_case_ : int ):
__magic_name__ = [transforms(snake_case_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__magic_name__ = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__magic_name__ = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case_ )
# Compute absolute learning rate
__magic_name__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__magic_name__ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
__magic_name__ = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
__magic_name__ = None
if training_args.resume_from_checkpoint is not None:
__magic_name__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__magic_name__ = last_checkpoint
__magic_name__ = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__magic_name__ = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case_ )
trainer.save_metrics('''eval''' , snake_case_ )
# Write model card and (optionally) push to hub
__magic_name__ = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 0 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
_a = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_a = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , A , A , A ) -> str:
'''simple docstring'''
__magic_name__ = AudioClassificationPipeline(model=A , feature_extractor=A )
# test with a raw waveform
__magic_name__ = np.zeros((3_40_00,) )
__magic_name__ = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def __A ( self , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = examples
__magic_name__ = audio_classifier(A )
# by default a model is initialized with num_labels=2
self.assertEqual(
A , [
{'''score''': ANY(A ), '''label''': ANY(A )},
{'''score''': ANY(A ), '''label''': ANY(A )},
] , )
__magic_name__ = audio_classifier(A , top_k=1 )
self.assertEqual(
A , [
{'''score''': ANY(A ), '''label''': ANY(A )},
] , )
self.run_torchaudio(A )
@require_torchaudio
def __A ( self , A ) -> Any:
'''simple docstring'''
import datasets
# test with a local file
__magic_name__ = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
__magic_name__ = dataset[0]['''audio''']['''array''']
__magic_name__ = audio_classifier(A )
self.assertEqual(
A , [
{'''score''': ANY(A ), '''label''': ANY(A )},
{'''score''': ANY(A ), '''label''': ANY(A )},
] , )
@require_torch
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = '''anton-l/wav2vec2-random-tiny-classifier'''
__magic_name__ = pipeline('''audio-classification''' , model=A )
__magic_name__ = np.ones((80_00,) )
__magic_name__ = audio_classifier(A , top_k=4 )
__magic_name__ = [
{'''score''': 0.08_42, '''label''': '''no'''},
{'''score''': 0.08_38, '''label''': '''up'''},
{'''score''': 0.08_37, '''label''': '''go'''},
{'''score''': 0.08_34, '''label''': '''right'''},
]
__magic_name__ = [
{'''score''': 0.08_45, '''label''': '''stop'''},
{'''score''': 0.08_44, '''label''': '''on'''},
{'''score''': 0.08_41, '''label''': '''right'''},
{'''score''': 0.08_34, '''label''': '''left'''},
]
self.assertIn(nested_simplify(A , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__magic_name__ = {'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
__magic_name__ = audio_classifier(A , top_k=4 )
self.assertIn(nested_simplify(A , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ) -> int:
'''simple docstring'''
import datasets
__magic_name__ = '''superb/wav2vec2-base-superb-ks'''
__magic_name__ = pipeline('''audio-classification''' , model=A )
__magic_name__ = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
__magic_name__ = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
__magic_name__ = audio_classifier(A , top_k=4 )
self.assertEqual(
nested_simplify(A , decimals=3 ) , [
{'''score''': 0.9_81, '''label''': '''go'''},
{'''score''': 0.0_07, '''label''': '''up'''},
{'''score''': 0.0_06, '''label''': '''_unknown_'''},
{'''score''': 0.0_01, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def __A ( self ) -> Any:
'''simple docstring'''
pass | 719 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ : Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PILImageResampling.BICUBIC , A = True , A = None , A = True , A = 1 / 2_55 , A = True , A = None , A = None , A = True , **A , ) -> None:
'''simple docstring'''
super().__init__(**A )
__magic_name__ = size if size is not None else {'''shortest_edge''': 2_24}
__magic_name__ = get_size_dict(A , default_to_square=A )
__magic_name__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__magic_name__ = get_size_dict(A , default_to_square=A , param_name='''crop_size''' )
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = resample
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_normalize
__magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD
__magic_name__ = do_convert_rgb
def __A ( self , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
__magic_name__ = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__magic_name__ = get_resize_output_image_size(A , size=size['''shortest_edge'''] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def __A ( self , A , A , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
__magic_name__ = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A , size=(size['''height'''], size['''width''']) , data_format=A , **A )
def __A ( self , A , A , A = None , **A , ) -> List[Any]:
'''simple docstring'''
return rescale(A , scale=A , data_format=A , **A )
def __A ( self , A , A , A , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
return normalize(A , mean=A , std=A , data_format=A , **A )
def __A ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
'''simple docstring'''
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(A , param_name='''size''' , default_to_square=A )
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ = crop_size if crop_size is not None else self.crop_size
__magic_name__ = get_size_dict(A , param_name='''crop_size''' , default_to_square=A )
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = image_mean if image_mean is not None else self.image_mean
__magic_name__ = image_std if image_std is not None else self.image_std
__magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__magic_name__ = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__magic_name__ = [convert_to_rgb(A ) for image in images]
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(A ) for image in images]
if do_resize:
__magic_name__ = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
__magic_name__ = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
__magic_name__ = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
__magic_name__ = [self.normalize(image=A , mean=A , std=A ) for image in images]
__magic_name__ = [to_channel_dimension_format(A , A ) for image in images]
__magic_name__ = {'''pixel_values''': images}
return BatchFeature(data=A , tensor_type=A ) | 720 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
a_ : Tuple = random.Random()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple=1.0 , snake_case_ : List[str]=None , snake_case_ : Tuple=None ):
if rng is None:
__magic_name__ = global_rng
__magic_name__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=4_00 , A=20_00 , A=10 , A=1_60 , A=8 , A=0.0 , A=40_00 , A=False , A=True , ) -> Dict:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = min_seq_length
__magic_name__ = max_seq_length
__magic_name__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__magic_name__ = padding_value
__magic_name__ = sampling_rate
__magic_name__ = return_attention_mask
__magic_name__ = do_normalize
__magic_name__ = feature_size
__magic_name__ = chunk_length
__magic_name__ = hop_length
def __A ( self ) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A=False , A=False ) -> int:
'''simple docstring'''
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
__magic_name__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__magic_name__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__magic_name__ = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = WhisperFeatureExtractor if is_speech_available() else None
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = WhisperFeatureExtractionTester(self )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__magic_name__ = self.feature_extraction_class.from_pretrained(A )
__magic_name__ = feat_extract_first.to_dict()
__magic_name__ = feat_extract_second.to_dict()
__magic_name__ = feat_extract_first.mel_filters
__magic_name__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = os.path.join(A , '''feat_extract.json''' )
feat_extract_first.to_json_file(A )
__magic_name__ = self.feature_extraction_class.from_json_file(A )
__magic_name__ = feat_extract_first.to_dict()
__magic_name__ = feat_extract_second.to_dict()
__magic_name__ = feat_extract_first.mel_filters
__magic_name__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__magic_name__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__magic_name__ = [np.asarray(A ) for speech_input in speech_inputs]
# Test feature size
__magic_name__ = feature_extractor(A , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__magic_name__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
__magic_name__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# Test batched
__magic_name__ = feature_extractor(A , return_tensors='''np''' ).input_features
__magic_name__ = feature_extractor(A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__magic_name__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__magic_name__ = np.asarray(A )
__magic_name__ = feature_extractor(A , return_tensors='''np''' ).input_features
__magic_name__ = feature_extractor(A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# Test truncation required
__magic_name__ = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
__magic_name__ = [np.asarray(A ) for speech_input in speech_inputs]
__magic_name__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
__magic_name__ = [np.asarray(A ) for speech_input in speech_inputs_truncated]
__magic_name__ = feature_extractor(A , return_tensors='''np''' ).input_features
__magic_name__ = feature_extractor(A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
import torch
__magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ = np.random.rand(1_00 , 32 ).astype(np.floataa )
__magic_name__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__magic_name__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__magic_name__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self , A ) -> Any:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__magic_name__ = ds.sort('''id''' ).select(range(A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
__magic_name__ = self._load_datasamples(1 )
__magic_name__ = WhisperFeatureExtractor()
__magic_name__ = feature_extractor(A , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A , atol=1E-4 ) )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ = self._load_datasamples(1 )[0]
__magic_name__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
__magic_name__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A )[0]
self.assertTrue(np.all(np.mean(A ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A ) - 1 ) < 1E-3 ) ) | 721 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 0 |
a_ : int = tuple[float, float, float]
a_ : Any = tuple[float, float, float]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Pointad , snake_case_ : Pointad ):
__magic_name__ = end_pointa[0] - end_pointa[0]
__magic_name__ = end_pointa[1] - end_pointa[1]
__magic_name__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vectorad , snake_case_ : Vectorad ):
__magic_name__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
__magic_name__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__magic_name__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vectorad , snake_case_ : int ):
return tuple(round(snake_case_ , snake_case_ ) for x in vector ) == (0, 0, 0)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Pointad , snake_case_ : Pointad , snake_case_ : Pointad , snake_case_ : int = 10 ):
__magic_name__ = create_vector(snake_case_ , snake_case_ )
__magic_name__ = create_vector(snake_case_ , snake_case_ )
return is_zero_vector(get_ad_vectors_cross(snake_case_ , snake_case_ ) , snake_case_ )
| 700 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__magic_name__ = len(snake_case_ )
__magic_name__ = max(snake_case_ )
__magic_name__ = min(snake_case_ )
# create the counting array
__magic_name__ = coll_max + 1 - coll_min
__magic_name__ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , snake_case_ ):
__magic_name__ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__magic_name__ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , snake_case_ ) ):
__magic_name__ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join([chr(snake_case_ ) for i in counting_sort([ord(snake_case_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
a_ : str = input('Enter numbers separated by a comma:\n').strip()
a_ : str = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted)) | 701 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = AltDiffusionPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__magic_name__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
__magic_name__ = CLIPTextModel(A )
__magic_name__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__magic_name__ = 77
__magic_name__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __A ( self , A , A=0 ) -> Any:
'''simple docstring'''
if str(A ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(A )
else:
__magic_name__ = torch.Generator(device=A ).manual_seed(A )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self ) -> Tuple:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __A ( self ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
torch.manual_seed(0 )
__magic_name__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__magic_name__ = RobertaSeriesModelWithTransformation(A )
__magic_name__ = text_encoder
__magic_name__ = AltDiffusionPipeline(**A )
__magic_name__ = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
__magic_name__ = self.get_dummy_inputs(A )
__magic_name__ = '''A photo of an astronaut'''
__magic_name__ = alt_pipe(**A )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = PNDMScheduler(skip_prk_steps=A )
torch.manual_seed(0 )
__magic_name__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__magic_name__ = RobertaSeriesModelWithTransformation(A )
__magic_name__ = text_encoder
__magic_name__ = AltDiffusionPipeline(**A )
__magic_name__ = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
__magic_name__ = self.get_dummy_inputs(A )
__magic_name__ = alt_pipe(**A )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=A )
__magic_name__ = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = alt_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
__magic_name__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=A , safety_checker=A )
__magic_name__ = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = alt_pipe([prompt] , generator=A , num_inference_steps=2 , output_type='''numpy''' )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 702 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 0 |
a_ : List[str] = 8.314_462 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod() | 703 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
a_ = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
a_ = '▁'
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["""input_ids""", """attention_mask"""]
_a = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> Any:
'''simple docstring'''
__magic_name__ = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
__magic_name__ = vocab_file
__magic_name__ = False if not self.vocab_file else True
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
__magic_name__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,) | 704 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 0 |
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """EncodecFeatureExtractor"""
_a = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , A , A ) -> Tuple:
'''simple docstring'''
super().__init__(A , A )
__magic_name__ = self.feature_extractor
__magic_name__ = False
def __A ( self , A=None , A=None , A=True ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=A , language=A , no_timestamps=A )
def __call__( self , *A , **A ) -> Union[str, Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*A , **A )
__magic_name__ = kwargs.pop('''audio''' , A )
__magic_name__ = kwargs.pop('''sampling_rate''' , A )
__magic_name__ = kwargs.pop('''text''' , A )
if len(A ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__magic_name__ = self.tokenizer(A , **A )
if audio is not None:
__magic_name__ = self.feature_extractor(A , *A , sampling_rate=A , **A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__magic_name__ = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__magic_name__ = audio_inputs['''padding_mask''']
return inputs
def __A ( self , *A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''audio''' , A )
__magic_name__ = kwargs.pop('''padding_mask''' , A )
if len(A ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if audio_values is not None:
return self._decode_audio(A , padding_mask=A )
else:
return self.tokenizer.batch_decode(*A , **A )
def __A ( self , *A , **A ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*A , **A )
def __A ( self , A , A = None ) -> List[np.ndarray]:
'''simple docstring'''
__magic_name__ = to_numpy(A )
__magic_name__ , __magic_name__ , __magic_name__ = audio_values.shape
if padding_mask is None:
return list(A )
__magic_name__ = to_numpy(A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__magic_name__ = seq_len - padding_mask.shape[-1]
__magic_name__ = 1 - self.feature_extractor.padding_value
__magic_name__ = np.pad(A , ((0, 0), (0, difference)) , '''constant''' , constant_values=A )
__magic_name__ = audio_values.tolist()
for i in range(A ):
__magic_name__ = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__magic_name__ = sliced_audio.reshape(A , -1 )
return audio_values | 705 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int ):
__magic_name__ = word.split()
def justify(snake_case_ : list , snake_case_ : int , snake_case_ : int ) -> str:
__magic_name__ = max_width - width
__magic_name__ = len(snake_case_ )
if len(snake_case_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__magic_name__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__magic_name__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__magic_name__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(snake_case_ ):
num_spaces_between_words_list[i] += 1
__magic_name__ = []
for i in range(snake_case_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(snake_case_ )
__magic_name__ = []
__magic_name__ = []
__magic_name__ = 0
for word in words:
if width + len(snake_case_ ) + len(snake_case_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(snake_case_ )
width += len(snake_case_ )
else:
# justify the line and add it to result
answer.append(justify(snake_case_ , snake_case_ , snake_case_ ) )
# reset new line and new width
__magic_name__ , __magic_name__ = [word], len(snake_case_ )
__magic_name__ = max_width - width - len(snake_case_ )
answer.append(''' '''.join(snake_case_ ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 706 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
a_ : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """AutoTokenizer"""
_a = ["""tokenizer"""]
_a = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , A , A=None ) -> List[str]:
'''simple docstring'''
super().__init__(A )
__magic_name__ = speaker_embeddings
@classmethod
def __A ( cls , A , A="speaker_embeddings_path.json" , **A ) -> List[str]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
__magic_name__ = get_file_from_repo(
A , A , subfolder=kwargs.pop('''subfolder''' , A ) , cache_dir=kwargs.pop('''cache_dir''' , A ) , force_download=kwargs.pop('''force_download''' , A ) , proxies=kwargs.pop('''proxies''' , A ) , resume_download=kwargs.pop('''resume_download''' , A ) , local_files_only=kwargs.pop('''local_files_only''' , A ) , use_auth_token=kwargs.pop('''use_auth_token''' , A ) , revision=kwargs.pop('''revision''' , A ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(A , A )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
__magic_name__ = None
else:
with open(A ) as speaker_embeddings_json:
__magic_name__ = json.load(A )
else:
__magic_name__ = None
__magic_name__ = AutoTokenizer.from_pretrained(A , **A )
return cls(tokenizer=A , speaker_embeddings=A )
def __A ( self , A , A="speaker_embeddings_path.json" , A="speaker_embeddings" , A = False , **A , ) -> Dict:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(A , A , '''v2''' ) , exist_ok=A )
__magic_name__ = {}
__magic_name__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__magic_name__ = self._load_voice_preset(A )
__magic_name__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , A , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=A , )
__magic_name__ = os.path.join(A , F'{prompt_key}_{key}.npy' )
__magic_name__ = tmp_dict
with open(os.path.join(A , A ) , '''w''' ) as fp:
json.dump(A , A )
super().save_pretrained(A , A , **A )
def __A ( self , A = None , **A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.speaker_embeddings[voice_preset]
__magic_name__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
__magic_name__ = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , A ) , cache_dir=kwargs.pop('''cache_dir''' , A ) , force_download=kwargs.pop('''force_download''' , A ) , proxies=kwargs.pop('''proxies''' , A ) , resume_download=kwargs.pop('''resume_download''' , A ) , local_files_only=kwargs.pop('''local_files_only''' , A ) , use_auth_token=kwargs.pop('''use_auth_token''' , A ) , revision=kwargs.pop('''revision''' , A ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
__magic_name__ = np.load(A )
return voice_preset_dict
def __A ( self , A = None ) -> Any:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self , A=None , A=None , A="pt" , A=2_56 , A=False , A=True , A=False , **A , ) -> int:
'''simple docstring'''
if voice_preset is not None and not isinstance(A , A ):
if (
isinstance(A , A )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__magic_name__ = self._load_voice_preset(A )
else:
if isinstance(A , A ) and not voice_preset.endswith('''.npz''' ):
__magic_name__ = voice_preset + '''.npz'''
__magic_name__ = np.load(A )
if voice_preset is not None:
self._validate_voice_preset_dict(A , **A )
__magic_name__ = BatchFeature(data=A , tensor_type=A )
__magic_name__ = self.tokenizer(
A , return_tensors=A , padding='''max_length''' , max_length=A , return_attention_mask=A , return_token_type_ids=A , add_special_tokens=A , **A , )
if voice_preset is not None:
__magic_name__ = voice_preset
return encoded_text | 707 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@staticmethod
def __A ( *A , **A ) -> List[Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__magic_name__ = image_classifier(A , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A ) , [
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}],
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''c'''}, {'''score''': 0.3_33, '''label''': '''b'''}],
] , )
__magic_name__ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
],
] , )
@require_tf
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__magic_name__ = image_classifier(A , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}] , )
__magic_name__ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
{'''score''': 0.3_33, '''label''': ANY(A )},
],
] , )
@slow
@require_torch
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__magic_name__ = image_classifier(A , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
__magic_name__ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__magic_name__ = image_classifier(A , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
__magic_name__ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , ) | 708 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Any = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 709 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
return image
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ):
__magic_name__ = dct.pop(snake_case_ )
__magic_name__ = val
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : str ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__magic_name__ = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
__magic_name__ = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
__magic_name__ = torch.cat((q_bias, torch.zeros_like(snake_case_ , requires_grad=snake_case_ ), v_bias) )
__magic_name__ = qkv_bias
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = 364 if '''coco''' in model_name else 224
__magic_name__ = InstructBlipVisionConfig(image_size=snake_case_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__magic_name__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__magic_name__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__magic_name__ = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
__magic_name__ = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__magic_name__ = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
__magic_name__ = InstructBlipConfig(vision_config=snake_case_ , text_config=snake_case_ , qformer_config=snake_case_ )
return config, image_size
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Any=None , snake_case_ : str=False ):
__magic_name__ = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
__magic_name__ = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__magic_name__ = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
__magic_name__ , __magic_name__ = get_blipa_config(snake_case_ )
__magic_name__ = InstructBlipForConditionalGeneration(snake_case_ ).eval()
__magic_name__ = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
__magic_name__ , __magic_name__ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__magic_name__ = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
__magic_name__ = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
__magic_name__ , __magic_name__ , __magic_name__ = load_model_and_preprocess(
name=snake_case_ , model_type=snake_case_ , is_eval=snake_case_ , device=snake_case_ )
original_model.eval()
print('''Done!''' )
# update state dict keys
__magic_name__ = original_model.state_dict()
__magic_name__ = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__magic_name__ = state_dict.pop(snake_case_ )
if key.startswith('''Qformer.bert''' ):
__magic_name__ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__magic_name__ = key.replace('''self''' , '''attention''' )
if "llm_proj" in key:
__magic_name__ = key.replace('''llm_proj''' , '''language_projection''' )
if "t5_proj" in key:
__magic_name__ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''llm_model''' ):
__magic_name__ = key.replace('''llm_model''' , '''language_model''' )
if key.startswith('''t5''' ):
__magic_name__ = key.replace('''t5''' , '''language''' )
__magic_name__ = val
# read in qv biases
read_in_q_v_bias(snake_case_ , snake_case_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(snake_case_ , strict=snake_case_ )
__magic_name__ = load_demo_image()
__magic_name__ = '''What is unusual about this image?'''
# create processor
__magic_name__ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=snake_case_ , image_std=snake_case_ )
__magic_name__ = InstructBlipProcessor(
image_processor=snake_case_ , tokenizer=snake_case_ , qformer_tokenizer=snake_case_ , )
__magic_name__ = processor(images=snake_case_ , text=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# make sure processor creates exact same pixel values
__magic_name__ = vis_processors['''eval'''](snake_case_ ).unsqueeze(0 ).to(snake_case_ )
__magic_name__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , snake_case_ )
original_model.to(snake_case_ )
hf_model.to(snake_case_ )
with torch.no_grad():
if "vicuna" in model_name:
__magic_name__ = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
__magic_name__ = hf_model(**snake_case_ ).logits
else:
__magic_name__ = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
__magic_name__ = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(snake_case_ )
__magic_name__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
__magic_name__ = hf_model(**snake_case_ , labels=snake_case_ ).logits
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__magic_name__ = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , snake_case_ , atol=snake_case_ )
print('''Looks ok!''' )
print('''Generating with original model...''' )
__magic_name__ = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
__magic_name__ = hf_model.generate(
**snake_case_ , do_sample=snake_case_ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__magic_name__ = 2
print('''Original generation:''' , snake_case_ )
__magic_name__ = processor.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
__magic_name__ = [text.strip() for text in output_text]
print('''HF generation:''' , snake_case_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if push_to_hub:
processor.push_to_hub(f'Salesforce/{model_name}' )
hf_model.push_to_hub(f'Salesforce/{model_name}' )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
a_ : Any = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a_ : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 710 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 711 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
__magic_name__ = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
__magic_name__ , __magic_name__ = imgs[0].size
__magic_name__ = Image.new('''RGB''' , size=(cols * w, rows * h) )
__magic_name__ , __magic_name__ = grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Optional[Any]=1 , snake_case_ : Optional[Any]=42 , ):
__magic_name__ = torch.Generator(pipeline.device ).manual_seed(snake_case_ )
__magic_name__ = pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
__magic_name__ = int(math.sqrt(snake_case_ ) )
__magic_name__ = image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
a_ : Dict = parse_args()
# Load models and create wrapper for stable diffusion
a_ : Union[str, Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
a_ : Optional[int] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
a_ : str = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
a_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
a_ : List[str] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
a_ : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
a_ : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
a_ : Optional[int] = unet.to(torch.device('cuda', args.cuda_id))
a_ : Optional[int] = pipeline.to(unet.device)
a_ : Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
a_ : str = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1))) | 712 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = []
__magic_name__ = []
__magic_name__ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__magic_name__ = len(snake_case_ ) if (len(snake_case_ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(snake_case_ ) , '''Postfix'''.center(snake_case_ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(snake_case_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(snake_case_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(snake_case_ ) == 0:
stack.append(snake_case_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(snake_case_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(snake_case_ ) # push x to stack
print(
x.center(8 ) , (''''''.join(snake_case_ )).ljust(snake_case_ ) , (''''''.join(snake_case_ )).ljust(snake_case_ ) , sep=''' | ''' , ) # Output in tabular format
while len(snake_case_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(snake_case_ )).ljust(snake_case_ ) , (''''''.join(snake_case_ )).ljust(snake_case_ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(snake_case_ ) # return Postfix as str
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
__magic_name__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(snake_case_ ) ):
if infix[i] == "(":
__magic_name__ = ''')''' # change "(" to ")"
elif infix[i] == ")":
__magic_name__ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(snake_case_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a_ : List[Any] = input('\nEnter an Infix Equation = ') # Input an Infix equation
a_ : Any = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)') | 713 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : str=7 ):
__magic_name__ = None
if token is not None:
__magic_name__ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
__magic_name__ = '''636036'''
__magic_name__ = f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
__magic_name__ = requests.get(snake_case_ , headers=snake_case_ ).json()
return result["workflow_runs"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = get_daily_ci_runs(snake_case_ )
__magic_name__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__magic_name__ = workflow_run['''id''']
break
return workflow_run_id
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : List[str] ):
__magic_name__ = get_last_daily_ci_runs(snake_case_ )
if workflow_run_id is not None:
__magic_name__ = get_artifacts_links(worflow_run_id=snake_case_ , token=snake_case_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__magic_name__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case_ , artifact_url=snake_case_ , output_dir=snake_case_ , token=snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Any ):
get_last_daily_ci_artifacts(snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = {}
for artifact_name in artifact_names:
__magic_name__ = os.path.join(snake_case_ , f'{artifact_name}.zip' )
if os.path.isfile(snake_case_ ):
__magic_name__ = {}
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
with z.open(snake_case_ ) as f:
__magic_name__ = f.read().decode('''UTF-8''' )
return results | 714 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
a_ : Dict = logging.get_logger(__name__)
a_ : Optional[Any] = 'Hello world! cécé herlolip'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : bool ):
__magic_name__ = FairseqRobertaModel.from_pretrained(snake_case_ )
roberta.eval() # disable dropout
__magic_name__ = roberta.model.encoder.sentence_encoder
__magic_name__ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__magic_name__ = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , snake_case_ )
__magic_name__ = XLMRobertaXLForSequenceClassification(snake_case_ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__magic_name__ = roberta_sent_encoder.embed_tokens.weight
__magic_name__ = roberta_sent_encoder.embed_positions.weight
__magic_name__ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__magic_name__ = roberta_sent_encoder.layer_norm.weight
__magic_name__ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__magic_name__ = model.roberta.encoder.layer[i]
__magic_name__ = roberta_sent_encoder.layers[i]
__magic_name__ = layer.attention
__magic_name__ = roberta_layer.self_attn_layer_norm.weight
__magic_name__ = roberta_layer.self_attn_layer_norm.bias
# self attention
__magic_name__ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__magic_name__ = roberta_layer.self_attn.q_proj.weight
__magic_name__ = roberta_layer.self_attn.q_proj.bias
__magic_name__ = roberta_layer.self_attn.k_proj.weight
__magic_name__ = roberta_layer.self_attn.k_proj.bias
__magic_name__ = roberta_layer.self_attn.v_proj.weight
__magic_name__ = roberta_layer.self_attn.v_proj.bias
# self-attention output
__magic_name__ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__magic_name__ = roberta_layer.self_attn.out_proj.weight
__magic_name__ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__magic_name__ = roberta_layer.final_layer_norm.weight
__magic_name__ = roberta_layer.final_layer_norm.bias
# intermediate
__magic_name__ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__magic_name__ = roberta_layer.fca.weight
__magic_name__ = roberta_layer.fca.bias
# output
__magic_name__ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__magic_name__ = roberta_layer.fca.weight
__magic_name__ = roberta_layer.fca.bias
# end of layer
if classification_head:
__magic_name__ = roberta.model.classification_heads['''mnli'''].dense.weight
__magic_name__ = roberta.model.classification_heads['''mnli'''].dense.bias
__magic_name__ = roberta.model.classification_heads['''mnli'''].out_proj.weight
__magic_name__ = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__magic_name__ = roberta.model.encoder.lm_head.dense.weight
__magic_name__ = roberta.model.encoder.lm_head.dense.bias
__magic_name__ = roberta.model.encoder.lm_head.layer_norm.weight
__magic_name__ = roberta.model.encoder.lm_head.layer_norm.bias
__magic_name__ = roberta.model.encoder.lm_head.weight
__magic_name__ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__magic_name__ = roberta.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
__magic_name__ = model(snake_case_ )[0]
if classification_head:
__magic_name__ = roberta.model.classification_heads['''mnli'''](roberta.extract_features(snake_case_ ) )
else:
__magic_name__ = roberta.model(snake_case_ )[0]
print(our_output.shape , their_output.shape )
__magic_name__ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__magic_name__ = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
a_ : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 715 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __A ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.dummy_uncond_unet
__magic_name__ = PNDMScheduler()
__magic_name__ = PNDMPipeline(unet=A , scheduler=A )
pndm.to(A )
pndm.set_progress_bar_config(disable=A )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pndm(generator=A , num_inference_steps=20 , output_type='''numpy''' ).images
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pndm(generator=A , num_inference_steps=20 , output_type='''numpy''' , return_dict=A )[0]
__magic_name__ = image[0, -3:, -3:, -1]
__magic_name__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = '''google/ddpm-cifar10-32'''
__magic_name__ = UNetaDModel.from_pretrained(A )
__magic_name__ = PNDMScheduler()
__magic_name__ = PNDMPipeline(unet=A , scheduler=A )
pndm.to(A )
pndm.set_progress_bar_config(disable=A )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pndm(generator=A , output_type='''numpy''' ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 716 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 0 |
import requests
a_ : List[str] = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
# fetching a list of articles in json format
__magic_name__ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(f'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 717 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = embedding_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def __A ( self , A , A , A , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = MegatronBertModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = MegatronBertForMaskedLM(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = MegatronBertForCausalLM(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = MegatronBertForNextSentencePrediction(config=A )
model.to(A )
model.eval()
__magic_name__ = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = MegatronBertForPreTraining(config=A )
model.to(A )
model.eval()
__magic_name__ = model(
A , attention_mask=A , token_type_ids=A , labels=A , next_sentence_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = MegatronBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
__magic_name__ = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = MegatronBertForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = MegatronBertForTokenClassification(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.num_choices
__magic_name__ = MegatronBertForMultipleChoice(config=A )
model.to(A )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_a = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
# test_resize_embeddings = False
_a = False
def __A ( self , A , A , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = MegatronBertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , hidden_size=37 )
def __A ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
return torch.tensor(
snake_case_ , dtype=torch.long , device=snake_case_ , )
a_ : List[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__magic_name__ = os.path.join(os.environ['''MYDIR'''] , A )
__magic_name__ = MegatronBertModel.from_pretrained(A )
model.to(A )
model.half()
__magic_name__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__magic_name__ = model(A )[0]
__magic_name__ = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , A )
__magic_name__ = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
__magic_name__ = output[0, ii, jj]
__magic_name__ = expected[3 * ii + jj]
__magic_name__ = '''ii={} jj={} a={} b={}'''.format(A , A , A , A )
self.assertTrue(math.isclose(A , A , rel_tol=A , abs_tol=A ) , msg=A ) | 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
__magic_name__ = gray_code_sequence_string(snake_case_ )
#
# convert them to integers
for i in range(len(snake_case_ ) ):
__magic_name__ = int(sequence[i] , 2 )
return sequence
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__magic_name__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__magic_name__ = gray_code_sequence_string(bit_count - 1 )
__magic_name__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__magic_name__ = '''0''' + smaller_sequence[i]
sequence.append(snake_case_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__magic_name__ = '''1''' + smaller_sequence[i]
sequence.append(snake_case_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod() | 719 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = 42
@flax_register_to_config
class SCREAMING_SNAKE_CASE_ ( nn.Module , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = 32
_a = 4
_a = 4
_a = (
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""DownBlock2D""",
)
_a = ("""UpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""")
_a = False
_a = (320, 640, 1280, 1280)
_a = 2
_a = 8
_a = None
_a = 1280
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = False
def __A ( self , A ) -> FrozenDict:
'''simple docstring'''
__magic_name__ = (1, self.in_channels, self.sample_size, self.sample_size)
__magic_name__ = jnp.zeros(A , dtype=jnp.floataa )
__magic_name__ = jnp.ones((1,) , dtype=jnp.intaa )
__magic_name__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__magic_name__ , __magic_name__ = jax.random.split(A )
__magic_name__ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A , A , A , A )["params"]
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.block_out_channels
__magic_name__ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__magic_name__ = self.num_attention_heads or self.attention_head_dim
# input
__magic_name__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__magic_name__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__magic_name__ = FlaxTimestepEmbedding(A , dtype=self.dtype )
__magic_name__ = self.only_cross_attention
if isinstance(A , A ):
__magic_name__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A , A ):
__magic_name__ = (num_attention_heads,) * len(self.down_block_types )
# down
__magic_name__ = []
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__magic_name__ = FlaxCrossAttnDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__magic_name__ = FlaxDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A )
__magic_name__ = down_blocks
# mid
__magic_name__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__magic_name__ = []
__magic_name__ = list(reversed(A ) )
__magic_name__ = list(reversed(A ) )
__magic_name__ = list(reversed(A ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = reversed_block_out_channels[min(i + 1 , len(A ) - 1 )]
__magic_name__ = i == len(A ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__magic_name__ = FlaxCrossAttnUpBlockaD(
in_channels=A , out_channels=A , prev_output_channel=A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__magic_name__ = FlaxUpBlockaD(
in_channels=A , out_channels=A , prev_output_channel=A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(A )
__magic_name__ = output_channel
__magic_name__ = up_blocks
# out
__magic_name__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__magic_name__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , A , A , A , A=None , A=None , A = True , A = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(A , jnp.ndarray ):
__magic_name__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A , jnp.ndarray ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps.astype(dtype=jnp.floataa )
__magic_name__ = jnp.expand_dims(A , 0 )
__magic_name__ = self.time_proj(A )
__magic_name__ = self.time_embedding(A )
# 2. pre-process
__magic_name__ = jnp.transpose(A , (0, 2, 3, 1) )
__magic_name__ = self.conv_in(A )
# 3. down
__magic_name__ = (sample,)
for down_block in self.down_blocks:
if isinstance(A , A ):
__magic_name__ , __magic_name__ = down_block(A , A , A , deterministic=not train )
else:
__magic_name__ , __magic_name__ = down_block(A , A , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__magic_name__ = ()
for down_block_res_sample, down_block_additional_residual in zip(
A , A ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__magic_name__ = new_down_block_res_samples
# 4. mid
__magic_name__ = self.mid_block(A , A , A , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__magic_name__ = down_block_res_samples[-(self.layers_per_block + 1) :]
__magic_name__ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(A , A ):
__magic_name__ = up_block(
A , temb=A , encoder_hidden_states=A , res_hidden_states_tuple=A , deterministic=not train , )
else:
__magic_name__ = up_block(A , temb=A , res_hidden_states_tuple=A , deterministic=not train )
# 6. post-process
__magic_name__ = self.conv_norm_out(A )
__magic_name__ = nn.silu(A )
__magic_name__ = self.conv_out(A )
__magic_name__ = jnp.transpose(A , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=A ) | 720 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 0 |
import argparse
import json
import subprocess
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : List[str] ):
__magic_name__ = []
__magic_name__ = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
__magic_name__ = subprocess.run(snake_case_ , shell=snake_case_ , stdout=subprocess.PIPE )
__magic_name__ = output.stdout.decode('''utf-8''' )
__magic_name__ = json.loads(snake_case_ )
__magic_name__ = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(snake_case_ )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(snake_case_ ) )
if len(snake_case_ ) > 0:
__magic_name__ = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
return values.split(''',''' )
a_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
a_ : int = parser.parse_args()
get_runner_status(args.target_runners, args.token) | 721 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : str ):
__magic_name__ = int(snake_case_ )
assert noofclusters < len(snake_case_ )
# Find out the dimensionality
__magic_name__ = len(vectors[0] )
# Will help select random centroids from among the available vectors
__magic_name__ = list(range(len(snake_case_ ) ) )
shuffle(snake_case_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__magic_name__ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__magic_name__ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__magic_name__ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(snake_case_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__magic_name__ = tf.placeholder('''float64''' , [dim] )
__magic_name__ = []
for centroid in centroids:
cent_assigns.append(tf.assign(snake_case_ , snake_case_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__magic_name__ = [tf.Variable(0 ) for i in range(len(snake_case_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__magic_name__ = tf.placeholder('''int32''' )
__magic_name__ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(snake_case_ , snake_case_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__magic_name__ = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__magic_name__ = tf.reduce_mean(snake_case_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__magic_name__ = tf.placeholder('''float''' , [dim] )
__magic_name__ = tf.placeholder('''float''' , [dim] )
__magic_name__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(snake_case_ , snake_case_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__magic_name__ = tf.placeholder('''float''' , [noofclusters] )
__magic_name__ = tf.argmin(snake_case_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__magic_name__ = tf.initialize_all_variables()
# Initialize all variables
sess.run(snake_case_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__magic_name__ = 100
for _ in range(snake_case_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(snake_case_ ) ):
__magic_name__ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__magic_name__ = [
sess.run(snake_case_ , feed_dict={va: vect, va: sess.run(snake_case_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__magic_name__ = sess.run(
snake_case_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(snake_case_ ):
# Collect all the vectors assigned to this cluster
__magic_name__ = [
vectors[i]
for i in range(len(snake_case_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__magic_name__ = sess.run(
snake_case_ , feed_dict={mean_input: array(snake_case_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__magic_name__ = sess.run(snake_case_ )
__magic_name__ = sess.run(snake_case_ )
return centroids, assignments
| 700 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PILImageResampling.BICUBIC , A = True , A = None , A = True , A = 1 / 2_55 , A = True , A = None , A = None , A = True , **A , ) -> None:
'''simple docstring'''
super().__init__(**A )
__magic_name__ = size if size is not None else {'''shortest_edge''': 2_24}
__magic_name__ = get_size_dict(A , default_to_square=A )
__magic_name__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__magic_name__ = get_size_dict(A , default_to_square=A , param_name='''crop_size''' )
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = resample
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_normalize
__magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD
__magic_name__ = do_convert_rgb
def __A ( self , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
__magic_name__ = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__magic_name__ = get_resize_output_image_size(A , size=size['''shortest_edge'''] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def __A ( self , A , A , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
__magic_name__ = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A , size=(size['''height'''], size['''width''']) , data_format=A , **A )
def __A ( self , A , A , A = None , **A , ) -> Any:
'''simple docstring'''
return rescale(A , scale=A , data_format=A , **A )
def __A ( self , A , A , A , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
return normalize(A , mean=A , std=A , data_format=A , **A )
def __A ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
'''simple docstring'''
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(A , param_name='''size''' , default_to_square=A )
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ = crop_size if crop_size is not None else self.crop_size
__magic_name__ = get_size_dict(A , param_name='''crop_size''' , default_to_square=A )
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = image_mean if image_mean is not None else self.image_mean
__magic_name__ = image_std if image_std is not None else self.image_std
__magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__magic_name__ = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__magic_name__ = [convert_to_rgb(A ) for image in images]
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(A ) for image in images]
if do_resize:
__magic_name__ = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
__magic_name__ = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
__magic_name__ = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
__magic_name__ = [self.normalize(image=A , mean=A , std=A ) for image in images]
__magic_name__ = [to_channel_dimension_format(A , A ) for image in images]
__magic_name__ = {'''pixel_values''': images}
return BatchFeature(data=A , tensor_type=A ) | 701 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
__magic_name__ = DPTConfig()
if "large" in checkpoint_url:
__magic_name__ = 1024
__magic_name__ = 4096
__magic_name__ = 24
__magic_name__ = 16
__magic_name__ = [5, 11, 17, 23]
__magic_name__ = [256, 512, 1024, 1024]
__magic_name__ = (1, 384, 384)
if "ade" in checkpoint_url:
__magic_name__ = True
__magic_name__ = 150
__magic_name__ = '''huggingface/label-files'''
__magic_name__ = '''ade20k-id2label.json'''
__magic_name__ = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type='''dataset''' ) ) , '''r''' ) )
__magic_name__ = {int(snake_case_ ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
__magic_name__ = [1, 150, 480, 480]
return config, expected_shape
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__magic_name__ = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
__magic_name__ = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
__magic_name__ = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
__magic_name__ = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__magic_name__ = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
__magic_name__ = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
__magic_name__ = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
__magic_name__ = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
__magic_name__ = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
__magic_name__ = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
__magic_name__ = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
__magic_name__ = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
__magic_name__ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__magic_name__ = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__magic_name__ = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
__magic_name__ = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
__magic_name__ = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
__magic_name__ = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
__magic_name__ = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__magic_name__ = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__magic_name__ = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__magic_name__ = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__magic_name__ = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__magic_name__ = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__magic_name__ = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__magic_name__ = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__magic_name__ = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__magic_name__ = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__magic_name__ = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__magic_name__ = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__magic_name__ = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
__magic_name__ = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
__magic_name__ = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
__magic_name__ = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
__magic_name__ = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Optional[int] ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__magic_name__ = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[: config.hidden_size, :]
__magic_name__ = in_proj_bias[: config.hidden_size]
__magic_name__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Dict ):
__magic_name__ , __magic_name__ = get_dpt_config(snake_case_ )
# load original state_dict from URL
__magic_name__ = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(snake_case_ )
# rename keys
for key in state_dict.copy().keys():
__magic_name__ = state_dict.pop(snake_case_ )
__magic_name__ = val
# read in qkv matrices
read_in_q_k_v(snake_case_ , snake_case_ )
# load HuggingFace model
__magic_name__ = DPTForSemanticSegmentation(snake_case_ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# Check outputs on an image
__magic_name__ = 480 if '''ade''' in checkpoint_url else 384
__magic_name__ = DPTImageProcessor(size=snake_case_ )
__magic_name__ = prepare_img()
__magic_name__ = image_processor(snake_case_ , return_tensors='''pt''' )
# forward pass
__magic_name__ = model(**snake_case_ ).logits if '''ade''' in checkpoint_url else model(**snake_case_ ).predicted_depth
# Assert logits
__magic_name__ = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
__magic_name__ = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(snake_case_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , snake_case_ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , snake_case_ )
)
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=snake_case_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=snake_case_ , )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
a_ : Optional[Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) | 702 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : List[str] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 703 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=18 , A=30 , A=4_00 , A=True , A=None , A=True , A=False , A=True , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size if size is not None else {'''height''': 18, '''width''': 20}
__magic_name__ = do_thumbnail
__magic_name__ = do_align_axis
__magic_name__ = do_pad
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def __A ( self ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = DonutImageProcessor if is_vision_available() else None
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = DonutImageProcessingTester(self )
@property
def __A ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''do_thumbnail''' ) )
self.assertTrue(hasattr(A , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(A , '''do_pad''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def __A ( self ) -> Tuple:
'''simple docstring'''
pass
@is_flaky()
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , ) | 704 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 0 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = (DPMSolverSDEScheduler,)
_a = 10
def __A ( self , **A ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**A )
return config
def __A ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A )
def __A ( self ) -> Any:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=A , beta_end=A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(A , A )
__magic_name__ = model(A , A )
__magic_name__ = scheduler.step(A , A , A )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(A ) )
__magic_name__ = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1E-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
__magic_name__ = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(A , A )
__magic_name__ = model(A , A )
__magic_name__ = scheduler.step(A , A , A )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(A ) )
__magic_name__ = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1E-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1E-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1E-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1E-3
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps , device=A )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__magic_name__ = scheduler.scale_model_input(A , A )
__magic_name__ = model(A , A )
__magic_name__ = scheduler.step(A , A , A )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(A ) )
__magic_name__ = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1E-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**A , use_karras_sigmas=A )
scheduler.set_timesteps(self.num_inference_steps , device=A )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
__magic_name__ = sample.to(A )
for t in scheduler.timesteps:
__magic_name__ = scheduler.scale_model_input(A , A )
__magic_name__ = model(A , A )
__magic_name__ = scheduler.step(A , A , A )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(A ) )
__magic_name__ = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2 | 705 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 706 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 707 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : list ):
_validate_point(snake_case_ )
_validate_point(snake_case_ )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(snake_case_ , snake_case_ ) ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[float] ):
if point:
if isinstance(snake_case_ , snake_case_ ):
for item in point:
if not isinstance(snake_case_ , (int, float) ):
__magic_name__ = (
'''Expected a list of numbers as input, found '''
f'{type(snake_case_ ).__name__}'
)
raise TypeError(snake_case_ )
else:
__magic_name__ = f'Expected a list of numbers as input, found {type(snake_case_ ).__name__}'
raise TypeError(snake_case_ )
else:
raise ValueError('''Missing an input''' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : list ):
_validate_point(snake_case_ )
_validate_point(snake_case_ )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(snake_case_ , snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 708 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 0 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=1_00 , A=13 , A=30 , A=2 , A=3 , A=True , A=True , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.02 , A=3 , ) -> Dict:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = vocab_size
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ = (image_size // patch_size) ** 2
__magic_name__ = num_patches + 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def __A ( self , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = FlaxBeitModel(config=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = FlaxBeitForMaskedImageModeling(config=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __A ( self , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.type_sequence_label_size
__magic_name__ = FlaxBeitForImageClassification(config=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = FlaxBeitForImageClassification(A )
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(A )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __A ( self ) -> None:
'''simple docstring'''
__magic_name__ = FlaxBeitModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def __A ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(A )
__magic_name__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(A , A )
__magic_name__ = model_class(A )
@jax.jit
def model_jitted(A , **A ):
return model(pixel_values=A , **A )
with self.subTest('''JIT Enabled''' ):
__magic_name__ = model_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__magic_name__ = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __A ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
__magic_name__ = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(A )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=A , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
__magic_name__ = np.ones((1, 1_96) , dtype=A )
# forward pass
__magic_name__ = model(pixel_values=A , bool_masked_pos=A )
__magic_name__ = outputs.logits
# verify the logits
__magic_name__ = (1, 1_96, 81_92)
self.assertEqual(logits.shape , A )
__magic_name__ = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A , atol=1E-2 ) )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=A , return_tensors='''np''' )
# forward pass
__magic_name__ = model(**A )
__magic_name__ = outputs.logits
# verify the logits
__magic_name__ = (1, 10_00)
self.assertEqual(logits.shape , A )
__magic_name__ = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1E-4 ) )
__magic_name__ = 2_81
self.assertEqual(logits.argmax(-1 ).item() , A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=A , return_tensors='''np''' )
# forward pass
__magic_name__ = model(**A )
__magic_name__ = outputs.logits
# verify the logits
__magic_name__ = (1, 2_18_41)
self.assertEqual(logits.shape , A )
__magic_name__ = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1E-4 ) )
__magic_name__ = 23_96
self.assertEqual(logits.argmax(-1 ).item() , A ) | 709 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 710 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=snake_case_ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=snake_case_ , default=5 )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=snake_case_ , default=1 )
parser.add_argument('''--freeze''' , type=snake_case_ , default=snake_case_ )
parser.add_argument('''--learning_rate''' , type=snake_case_ , default=5E-4 )
parser.add_argument('''--seed''' , type=snake_case_ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=snake_case_ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=snake_case_ , default=10 )
parser.add_argument('''--weight_decay''' , type=snake_case_ , default=0.01 )
parser.add_argument('''--output_dir''' , type=snake_case_ , default='''./results''' )
return parser.parse_args()
a_ : Tuple = load('accuracy')
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ , __magic_name__ = eval_pred
__magic_name__ = np.argmax(snake_case_ , axis=1 )
return metric.compute(predictions=snake_case_ , references=snake_case_ )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , A ) -> None:
'''simple docstring'''
super().__init__()
__magic_name__ = trainer
def __A ( self , A , A , A , **A ) -> Union[str, Any]:
'''simple docstring'''
if control.should_evaluate:
__magic_name__ = deepcopy(A )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = get_args()
set_seed(args.seed )
__magic_name__ = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
__magic_name__ = dataset.train_test_split(test_size=0.2 )
__magic_name__ = train_test['''test'''].train_test_split(test_size=0.5 )
__magic_name__ = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
__magic_name__ = AutoTokenizer.from_pretrained(args.model_ckpt )
__magic_name__ = tokenizer.eos_token
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__magic_name__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__magic_name__ = False
__magic_name__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(snake_case_ : Optional[Any] ):
__magic_name__ = tokenizer(example['''src'''] , truncation=snake_case_ , max_length=1024 )
__magic_name__ = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__magic_name__ = train_test_validation.map(
snake_case_ , batched=snake_case_ , remove_columns=train_test_validation['''train'''].column_names , )
__magic_name__ = DataCollatorWithPadding(tokenizer=snake_case_ )
__magic_name__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
__magic_name__ = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=snake_case_ , data_collator=snake_case_ , compute_metrics=snake_case_ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(snake_case_ ) )
trainer.train()
if __name__ == "__main__":
main() | 711 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ : Any = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 712 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 0 |
from scipy.stats import pearsonr
import datasets
a_ : Any = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
a_ : str = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
a_ : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __A ( self , A , A , A=False ) -> Union[str, Any]:
'''simple docstring'''
if return_pvalue:
__magic_name__ = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )} | 713 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 0 |
a_ : List[Any] = {str(digit): digit**5 for digit in range(10)}
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( ):
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case_ ) )
if __name__ == "__main__":
print(solution()) | 714 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A )
| 715 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = (KDPMaDiscreteScheduler,)
_a = 10
def __A ( self , **A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A )
return config
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=A , beta_end=A )
def __A ( self ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
__magic_name__ = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(A , A )
__magic_name__ = model(A , A )
__magic_name__ = scheduler.step(A , A , A )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(A ) )
__magic_name__ = torch.mean(torch.abs(A ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(A , A )
__magic_name__ = model(A , A )
__magic_name__ = scheduler.step(A , A , A )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(A ) )
__magic_name__ = torch.mean(torch.abs(A ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def __A ( self ) -> Any:
'''simple docstring'''
if torch_device == "mps":
return
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps , device=A )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__magic_name__ = scheduler.scale_model_input(A , A )
__magic_name__ = model(A , A )
__magic_name__ = scheduler.step(A , A , A )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(A ) )
__magic_name__ = torch.mean(torch.abs(A ) )
if str(A ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3 | 716 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ : List[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """focalnet"""
def __init__( self , A=2_24 , A=4 , A=3 , A=96 , A=False , A=[1_92, 3_84, 7_68, 7_68] , A=[2, 2, 6, 2] , A=[2, 2, 2, 2] , A=[3, 3, 3, 3] , A="gelu" , A=4.0 , A=0.0 , A=0.1 , A=False , A=1E-4 , A=False , A=False , A=False , A=0.02 , A=1E-5 , A=32 , A=None , A=None , **A , ) -> str:
'''simple docstring'''
super().__init__(**A )
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = use_conv_embed
__magic_name__ = hidden_sizes
__magic_name__ = depths
__magic_name__ = focal_levels
__magic_name__ = focal_windows
__magic_name__ = hidden_act
__magic_name__ = mlp_ratio
__magic_name__ = hidden_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = use_layerscale
__magic_name__ = layerscale_value
__magic_name__ = use_post_layernorm
__magic_name__ = use_post_layernorm_in_modulation
__magic_name__ = normalize_modulator
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = encoder_stride
__magic_name__ = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
__magic_name__ , __magic_name__ = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
| 717 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[str] ):
__magic_name__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
__magic_name__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__magic_name__ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if "visual_encoder" in key:
__magic_name__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , snake_case_ )
if "blocks" in key:
__magic_name__ = re.sub(r'''blocks''' , '''layers''' , snake_case_ )
if "attn" in key:
__magic_name__ = re.sub(r'''attn''' , '''self_attn''' , snake_case_ )
if "norm1" in key:
__magic_name__ = re.sub(r'''norm1''' , '''layer_norm1''' , snake_case_ )
if "norm2" in key:
__magic_name__ = re.sub(r'''norm2''' , '''layer_norm2''' , snake_case_ )
if "encoder.norm" in key:
__magic_name__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , snake_case_ )
if "encoder.patch_embed.proj" in key:
__magic_name__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , snake_case_ )
if "encoder.pos_embed" in key:
__magic_name__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , snake_case_ )
if "encoder.cls_token" in key:
__magic_name__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , snake_case_ )
if "self_attn" in key:
__magic_name__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , snake_case_ )
return key
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[str]=None ):
if config_path is not None:
__magic_name__ = BlipConfig.from_pretrained(snake_case_ )
else:
__magic_name__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__magic_name__ = BlipForConditionalGeneration(snake_case_ ).eval()
__magic_name__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__magic_name__ = blip_decoder(pretrained=snake_case_ , image_size=384 , vit='''base''' )
__magic_name__ = pt_model.eval()
__magic_name__ = pt_model.state_dict()
for key in modified_state_dict.copy():
__magic_name__ = modified_state_dict.pop(snake_case_ )
__magic_name__ = rename_key(snake_case_ )
__magic_name__ = value
hf_model.load_state_dict(snake_case_ )
__magic_name__ = 384
__magic_name__ = load_demo_image(image_size=snake_case_ , device='''cpu''' )
__magic_name__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__magic_name__ = tokenizer(['''a picture of'''] ).input_ids
__magic_name__ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__magic_name__ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__magic_name__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__magic_name__ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
vqa_model.eval()
__magic_name__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
__magic_name__ = modified_state_dict.pop(snake_case_ )
__magic_name__ = rename_key(snake_case_ )
__magic_name__ = value
__magic_name__ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
__magic_name__ = ['''How many dogs are in this image?''']
__magic_name__ = tokenizer(snake_case_ , return_tensors='''pt''' ).input_ids
__magic_name__ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__magic_name__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__magic_name__ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
itm_model.eval()
__magic_name__ = itm_model.state_dict()
for key in modified_state_dict.copy():
__magic_name__ = modified_state_dict.pop(snake_case_ )
__magic_name__ = rename_key(snake_case_ )
__magic_name__ = value
__magic_name__ = BlipForImageTextRetrieval(snake_case_ )
__magic_name__ = ['''A picture of a woman with a dog sitting in a beach''']
__magic_name__ = tokenizer(
snake_case_ , return_tensors='''pt''' , padding='''max_length''' , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
__magic_name__ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
__magic_name__ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a_ : str = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.