code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase: str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowercase: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 227 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a ( unittest.TestCase ):
_a : List[str] = JukeboxTokenizer
_a : List[Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 329 | 0 |
'''simple docstring'''
import string
def a__ ( lowercase : str ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
_UpperCamelCase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
_UpperCamelCase = string.ascii_uppercase.find(a__ )
_UpperCamelCase = num - key
if num < 0:
_UpperCamelCase = num + len(string.ascii_uppercase )
_UpperCamelCase = translated + string.ascii_uppercase[num]
else:
_UpperCamelCase = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def a__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase = input('''Encrypted message: ''' )
_UpperCamelCase = message.upper()
decrypt(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 324 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_UpperCAmelCase = parser.parse_args()
return args
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
def fn(a__: str ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase__ ( a__: List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_UpperCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_UpperCAmelCase = tf.train.Features(feature=a__ )
_UpperCAmelCase = tf.train.Example(features=a__ )
_UpperCAmelCase = example.SerializeToString()
records.append(a__ )
return records
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCAmelCase = min(len(a__ ) , args.limit )
_UpperCAmelCase = dataset.select(range(a__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCAmelCase = tokenize_function(a__ )
_UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a__: Optional[int] ):
# Concatenate all texts.
_UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
_UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCAmelCase = len(dataset_snapshot['input_ids'] )
_UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCAmelCase = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
_UpperCAmelCase = serialized_examples[i]
out_file.write(a__ )
print('Wrote file {} containing {} records'.format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = parse_args()
main(args)
| 329 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a=False , a=False , a=False ) -> str:
__A : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[Any]:
for i in range(config.num_hidden_layers ):
__A : Tuple = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A : Dict = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
__A : List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__A : int = in_proj_weight[
: config.hidden_size, :
]
__A : Tuple = in_proj_bias[: config.hidden_size]
__A : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A : Any = in_proj_weight[
-config.hidden_size :, :
]
__A : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
__A : Dict = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Dict:
__A : str = dct.pop(a__ )
__A : Optional[int] = val
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[Any]:
__A : Tuple = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=a__ )
__A : Optional[int] = False
__A : List[str] = False
__A : Union[str, Any] = False
__A : List[str] = False
if "vqa" in checkpoint_url:
__A : Tuple = True
__A : str = 31_29
__A : Dict = 'huggingface/label-files'
__A : List[Any] = 'vqa2-id2label.json'
__A : List[str] = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
__A : List[str] = {int(a__ ): v for k, v in idalabel.items()}
__A : Tuple = idalabel
__A : str = {v: k for k, v in idalabel.items()}
__A : List[Any] = ViltForQuestionAnswering(a__ )
elif "nlvr" in checkpoint_url:
__A : Union[str, Any] = True
__A : Dict = 2
__A : str = {0: 'False', 1: 'True'}
__A : Any = {v: k for k, v in config.idalabel.items()}
__A : int = 3
__A : List[str] = ViltForImagesAndTextClassification(a__ )
elif "irtr" in checkpoint_url:
__A : Optional[int] = True
__A : Optional[Any] = ViltForImageAndTextRetrieval(a__ )
elif "mlm_itm" in checkpoint_url:
__A : Union[str, Any] = True
__A : str = ViltForMaskedLM(a__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
__A : Tuple = torch.hub.load_state_dict_from_url(a__ , map_location='cpu' )['state_dict']
__A : Tuple = create_rename_keys(a__ , a__ , a__ , a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ )
if mlm_model or irtr_model:
__A : Optional[Any] = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__A , __A : Tuple = model.load_state_dict(a__ , strict=a__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a__ )
# Define processor
__A : Any = ViltImageProcessor(size=3_84 )
__A : Optional[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
__A : Any = ViltProcessor(a__ , a__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
__A : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a__ ).raw )
__A : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a__ ).raw )
__A : List[str] = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
__A : Union[str, Any] = processor(a__ , a__ , return_tensors='pt' )
__A : Union[str, Any] = processor(a__ , a__ , return_tensors='pt' )
__A : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
__A : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a__ ).raw )
if mlm_model:
__A : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
__A : int = 'How many cats are there?'
__A : Dict = processor(a__ , a__ , return_tensors='pt' )
__A : Union[str, Any] = model(**a__ )
# Verify outputs
if mlm_model:
__A : Optional[int] = torch.Size([1, 11, 3_05_22] )
__A : Dict = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a__ , atol=1e-4 )
# verify masked token prediction equals "cats"
__A : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__A : Any = torch.Size([1, 31_29] )
__A : List[str] = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a__ , atol=1e-4 )
# verify vqa prediction equals "2"
__A : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__A : List[str] = torch.Size([1, 2] )
__A : List[str] = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a__ ).mkdir(exist_ok=a__ )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 280 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = []
for step in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(a__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , a__ )
_UpperCAmelCase = torch.load(a__ )
scheduler.load_state_dict(a__ )
return lrs
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __a ( unittest.TestCase ):
_a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None
_a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_a : List[Any] = 10
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' )
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = fn
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = '''https://openaipublic.azureedge.net/jukebox/models/'''
_UpperCamelCase = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def a_ ( _lowerCAmelCase ) -> Dict:
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
__lowerCamelCase : Optional[Any] = key.replace('.model.1.bias' ,'.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
__lowerCamelCase : str = key.replace('.model.1.weight' ,'.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
__lowerCamelCase : List[str] = key.replace('.model.3.bias' ,'.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
__lowerCamelCase : Dict = key.replace('.model.3.weight' ,'.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
__lowerCamelCase : str = key.replace('conditioner_blocks.0' ,'conditioner_blocks' )
if "prime_prior" in key:
__lowerCamelCase : Union[str, Any] = key.replace('prime_prior' ,'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__lowerCamelCase : Tuple = key.replace('.emb.' ,'.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' ,'.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' ,'metadata_embedding.' )
if "x_emb.emb." in key:
__lowerCamelCase : Union[str, Any] = key.replace('0.x_emb.emb' ,'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' ,'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' ,'.layer_norm' )
if "_ln" in key:
return key.replace('_ln' ,'_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' ,'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' ,'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' ,'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' ,'embed_tokens' )
return key
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Union[str, Any]:
__lowerCamelCase : int = {}
import re
__lowerCamelCase : List[Any] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__lowerCamelCase : Optional[Any] = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__lowerCamelCase : Optional[int] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__lowerCamelCase : str = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__lowerCamelCase : Tuple = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__lowerCamelCase : Optional[Any] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__lowerCamelCase : Optional[Any] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
__lowerCamelCase : Any = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__lowerCamelCase : str = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(a__ ):
__lowerCamelCase : Optional[int] = re_encoder_block_conv_in.match(a__ )
__lowerCamelCase : Optional[int] = regex_match.groups()
__lowerCamelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
__lowerCamelCase : str = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__lowerCamelCase : Dict = re_encoder_block_conv_in.sub(a__ ,a__ )
elif re_encoder_block_resnet.fullmatch(a__ ):
__lowerCamelCase : Dict = re_encoder_block_resnet.match(a__ )
__lowerCamelCase : int = regex_match.groups()
__lowerCamelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] )
__lowerCamelCase : List[Any] = {'1': 1, '3': 2}[groups[-2]]
__lowerCamelCase : Dict = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__lowerCamelCase : Tuple = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__lowerCamelCase : Optional[int] = prefix + resnet_block
__lowerCamelCase : str = re_encoder_block_resnet.sub(a__ ,a__ )
elif re_encoder_block_proj_out.fullmatch(a__ ):
__lowerCamelCase : str = re_encoder_block_proj_out.match(a__ )
__lowerCamelCase : Optional[int] = regex_match.groups()
__lowerCamelCase : List[Any] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__lowerCamelCase : Union[str, Any] = re_encoder_block_proj_out.sub(a__ ,a__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(a__ ):
__lowerCamelCase : Union[str, Any] = re_decoder_block_conv_out.match(a__ )
__lowerCamelCase : Optional[int] = regex_match.groups()
__lowerCamelCase : int = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCamelCase : Optional[int] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__lowerCamelCase : Any = re_decoder_block_conv_out.sub(a__ ,a__ )
elif re_decoder_block_resnet.fullmatch(a__ ):
__lowerCamelCase : Union[str, Any] = re_decoder_block_resnet.match(a__ )
__lowerCamelCase : List[str] = regex_match.groups()
__lowerCamelCase : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCamelCase : Optional[Any] = {'1': 1, '3': 2}[groups[-2]]
__lowerCamelCase : Dict = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__lowerCamelCase : Optional[int] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__lowerCamelCase : Tuple = prefix + resnet_block
__lowerCamelCase : int = re_decoder_block_resnet.sub(a__ ,a__ )
elif re_decoder_block_proj_in.fullmatch(a__ ):
__lowerCamelCase : Optional[Any] = re_decoder_block_proj_in.match(a__ )
__lowerCamelCase : Dict = regex_match.groups()
__lowerCamelCase : Dict = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__lowerCamelCase : str = re_decoder_block_proj_in.sub(a__ ,a__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(a__ ):
__lowerCamelCase : Optional[Any] = re_prior_cond_conv_out.match(a__ )
__lowerCamelCase : Optional[int] = regex_match.groups()
__lowerCamelCase : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCamelCase : str = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__lowerCamelCase : Optional[int] = re_prior_cond_conv_out.sub(a__ ,a__ )
elif re_prior_cond_resnet.fullmatch(a__ ):
__lowerCamelCase : Union[str, Any] = re_prior_cond_resnet.match(a__ )
__lowerCamelCase : str = regex_match.groups()
__lowerCamelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCamelCase : str = {'1': 1, '3': 2}[groups[-2]]
__lowerCamelCase : Optional[Any] = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__lowerCamelCase : Union[str, Any] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__lowerCamelCase : List[Any] = prefix + resnet_block
__lowerCamelCase : Optional[Any] = re_prior_cond_resnet.sub(a__ ,a__ )
elif re_prior_cond_proj_in.fullmatch(a__ ):
__lowerCamelCase : Union[str, Any] = re_prior_cond_proj_in.match(a__ )
__lowerCamelCase : List[str] = regex_match.groups()
__lowerCamelCase : str = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__lowerCamelCase : List[Any] = re_prior_cond_proj_in.sub(a__ ,a__ )
# keep original key
else:
__lowerCamelCase : Tuple = original_key
__lowerCamelCase : Optional[int] = replace_key(a__ )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
__lowerCamelCase : Dict = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__lowerCamelCase : int = original_key
__lowerCamelCase : Any = original_key
__lowerCamelCase : Dict = value
return new_dict
@torch.no_grad()
def a_ ( _lowerCAmelCase=None ,_lowerCAmelCase=None ) -> Tuple:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__lowerCamelCase : Any = requests.get(F'{PREFIX}{file}' ,allow_redirects=a__ )
os.makedirs(F'{pytorch_dump_folder_path}/' ,exist_ok=a__ )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ,'wb' ).write(r.content )
__lowerCamelCase : Any = MODEL_MAPPING[model_name.split('/' )[-1]]
__lowerCamelCase : str = JukeboxConfig.from_pretrained(a__ )
__lowerCamelCase : List[Any] = JukeboxModel(a__ )
__lowerCamelCase : Any = []
__lowerCamelCase : List[str] = {}
for i, dict_name in enumerate(a__ ):
__lowerCamelCase : Dict = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
__lowerCamelCase : List[str] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
__lowerCamelCase : int = old_dic[k]
elif k.endswith('.w' ):
__lowerCamelCase : Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__lowerCamelCase : int = old_dic[k]
else:
__lowerCamelCase : Tuple = old_dic[k]
__lowerCamelCase : Any = 'vqvae' if i == 0 else F'priors.{3 - i}'
__lowerCamelCase : List[Any] = fix_jukebox_keys(a__ ,model.state_dict() ,a__ ,a__ )
weight_dict.append(a__ )
__lowerCamelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(a__ )
for i in range(len(a__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(a__ ).mkdir(exist_ok=a__ )
with open(F'{pytorch_dump_folder_path}/mapping.json' ,'w' ) as txtfile:
json.dump(a__ ,a__ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a__ )
return weight_dict
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
_UpperCamelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 208 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :Any = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = original_name.split('.' )[0]
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] )
_UpperCAmelCase = orig_block_num - offset
_UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_UpperCAmelCase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase = key[: key.find('proj' )]
_UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' )
_UpperCAmelCase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' )
if "norm2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = model_name[-3:]
_UpperCAmelCase = 1_0_0_0
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_0_0_0)
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase = [2, 2, 6, 2]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s24":
_UpperCAmelCase = [4, 4, 1_2, 4]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.9
elif size == "m36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
elif size == "m48":
_UpperCAmelCase = [8, 8, 2_4, 8]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
# Prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(a__ )
# create HuggingFace model and load state dict
_UpperCAmelCase = PoolFormerForImageClassification(a__ )
model.load_state_dict(a__ )
model.eval()
# Define image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_UpperCAmelCase = model(a__ )
_UpperCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __lowerCAmelCase ( a__ , a__=False ) -> Any:
try:
__a = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__a = default
else:
# KEY is set, convert it to True or False.
try:
__a = strtobool(a__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
A : Tuple = parse_flag_from_env('RUN_SLOW', default=False)
def __lowerCAmelCase ( a__ ) -> int:
return unittest.skip('''Test was skipped''' )(a__ )
def __lowerCAmelCase ( a__ ) -> Any:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(a__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(a__ )
def __lowerCAmelCase ( a__ ) -> int:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(a__ )
def __lowerCAmelCase ( a__ ) -> Union[str, Any]:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(a__ )
def __lowerCAmelCase ( a__ ) -> Dict:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(a__ )
def __lowerCAmelCase ( a__ ) -> Any:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(a__ )
def __lowerCAmelCase ( a__ ) -> List[str]:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(a__ )
def __lowerCAmelCase ( a__ ) -> Tuple:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(a__ )
def __lowerCAmelCase ( a__ ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(a__ )
def __lowerCAmelCase ( a__ ) -> List[str]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(a__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(a__ )
def __lowerCAmelCase ( a__ ) -> List[Any]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(a__ )
def __lowerCAmelCase ( a__ ) -> Tuple:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(a__ )
def __lowerCAmelCase ( a__ ) -> int:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(a__ )
def __lowerCAmelCase ( a__ ) -> List[Any]:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(a__ )
def __lowerCAmelCase ( a__=None , a__=None ) -> str:
if test_case is None:
return partial(a__ , version=a__ )
return unittest.skipUnless(is_torch_version('''>=''' , a__ ) , F"""test requires torch version >= {version}""" )(a__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(a__ )
def __lowerCAmelCase ( a__ ) -> Any:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(a__ )
def __lowerCAmelCase ( a__ ) -> str:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(a__ )
A : Optional[int] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(a__ )
class __A( unittest.TestCase ):
snake_case_ = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> int:
'''simple docstring'''
__a = tempfile.mkdtemp()
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Any:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_SCREAMING_SNAKE_CASE )
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = mocks if isinstance(_SCREAMING_SNAKE_CASE , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __lowerCAmelCase ( a__ ) -> str:
__a = AcceleratorState()
__a = tensor[None].clone().to(state.device )
__a = gather(a__ ).cpu()
__a = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , a__ ):
return False
return True
class __A:
def __init__( self , _snake_case , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = returncode
__a = stdout
__a = stderr
async def __lowerCAmelCase ( a__ , a__ ) -> Dict:
while True:
__a = await stream.readline()
if line:
callback(a__ )
else:
break
async def __lowerCAmelCase ( a__ , a__=None , a__=None , a__=None , a__=False , a__=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(a__ ) )
__a = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__a = []
__a = []
def tee(a__ , a__ , a__ , a__="" ):
__a = line.decode('''utf-8''' ).rstrip()
sink.append(a__ )
if not quiet:
print(a__ , a__ , file=a__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda a__ : tee(a__ , a__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda a__ : tee(a__ , a__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=a__ , )
return _RunOutput(await p.wait() , a__ , a__ )
def __lowerCAmelCase ( a__ , a__=None , a__=None , a__=180 , a__=False , a__=True ) -> _RunOutput:
__a = asyncio.get_event_loop()
__a = loop.run_until_complete(
_stream_subprocess(a__ , env=a__ , stdin=a__ , timeout=a__ , quiet=a__ , echo=a__ ) )
__a = ''' '''.join(a__ )
if result.returncode > 0:
__a = '''\n'''.join(result.stderr )
raise RuntimeError(
F"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class __A( a ):
pass
def __lowerCAmelCase ( a__ , a__=False ) -> List[str]:
try:
__a = subprocess.check_output(a__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(a__ , '''decode''' ):
__a = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{' '.join(a__ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e | 6 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = process
_UpperCAmelCase = params
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dataset[i]
_UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params )
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = loader
_UpperCAmelCase = infer
_UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase = None
_UpperCAmelCase = loader_batch_size
# Internal bookkeeping
_UpperCAmelCase = None
_UpperCAmelCase = None
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase = next(self.iterator )
_UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase = processed
_UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
_UpperCAmelCase = None
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.subiterator is None:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCAmelCase = next(self.subiterator )
return processed
class __a ( UpperCAmelCase ):
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
_UpperCAmelCase = processed
_UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_UpperCAmelCase = processed
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
return accumulator
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = key
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = keya
_UpperCAmelCase = keya
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 329 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Union[str, Any] = len(a__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_lowerCAmelCase : Optional[int] = i + 1
else:
_lowerCAmelCase : Tuple = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 36 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __a ( UpperCAmelCase ):
_a : str = 'data2vec-text'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 329 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : Optional[int]=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=37 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=[1, 384, 24, 24] , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=None , ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = backbone_out_indices
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = backbone_featmap_shape
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCAmelCase__ = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DPTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
__SCREAMING_SNAKE_CASE = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
__SCREAMING_SNAKE_CASE = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
__SCREAMING_SNAKE_CASE = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__SCREAMING_SNAKE_CASE = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__SCREAMING_SNAKE_CASE = DPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = """add"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__SCREAMING_SNAKE_CASE = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = outputs.predicted_depth
# verify the predicted depth
__SCREAMING_SNAKE_CASE = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 267 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_a : Optional[int] = False
_a : List[str] = False
_a : List[str] = False
_a : Optional[int] = False
_a : Tuple = False
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has'''
f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class __a ( unittest.TestCase , UpperCAmelCase ):
_a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_a : Any = MaskFormerSwinConfig
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 329 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
snake_case__ : Union[str, Any] = TypeVar('T')
class A_ ( Generic[T] ):
def __init__(self :List[str] , _UpperCamelCase :List[str] )-> List[Any]:
__A = data
__A = None
def __str__(self :Optional[int] )-> str:
return f"""{self.data}"""
class A_ ( Generic[T] ):
def __init__(self :List[str] )-> None:
__A = None
def __iter__(self :Union[str, Any] )-> Iterator[T]:
__A = self.top
while node:
yield node.data
__A = node.next
def __str__(self :List[str] )-> str:
return "->".join([str(_SCREAMING_SNAKE_CASE ) for item in self] )
def __len__(self :Optional[Any] )-> int:
return len(tuple(iter(self ) ) )
def _lowerCAmelCase (self :str )-> bool:
return self.top is None
def _lowerCAmelCase (self :Dict , _UpperCamelCase :List[Any] )-> None:
__A = Node(_SCREAMING_SNAKE_CASE )
if not self.is_empty():
__A = self.top
__A = node
def _lowerCAmelCase (self :Dict )-> T:
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , _SCREAMING_SNAKE_CASE )
__A = self.top
__A = self.top.next
return pop_node.data
def _lowerCAmelCase (self :str )-> T:
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def _lowerCAmelCase (self :Tuple )-> None:
__A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 117 |
from collections.abc import Generator
def lowerCAmelCase__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 329 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def snake_case ( UpperCAmelCase )-> int:
"""simple docstring"""
__A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def snake_case ( UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
__A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
__A = s_dict.pop(a__ )
elif "subsample" in key:
__A = s_dict.pop(a__ )
def snake_case ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
__A , __A = emb.weight.shape
__A = nn.Linear(a__ , a__ , bias=a__ )
__A = emb.weight.data
return lin_layer
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Tuple:
"""simple docstring"""
__A = torch.load(a__ , map_location='cpu' )
__A = mam_aaa['args']
__A = mam_aaa['model']
__A = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(a__ )
rename_keys(a__ )
__A = state_dict['decoder.embed_tokens.weight'].shape[0]
__A = args.share_decoder_input_output_embed
__A = [int(a__ ) for i in args.conv_kernel_sizes.split(',' )]
__A = SpeechaTextConfig(
vocab_size=a__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(a__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=a__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a__ , num_beams=5 , max_length=2_0_0 , use_cache=a__ , decoder_start_token_id=2 , early_stopping=a__ , )
__A = SpeechaTextForConditionalGeneration(a__ )
__A , __A = model.model.load_state_dict(a__ , strict=a__ )
if len(a__ ) > 0 and not set(a__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f' but all the following weights are missing {missing}' )
if tie_embeds:
__A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__A = lm_head_weights
model.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
a__ : List[Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 161 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 30}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 329 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 187 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20}
_UpperCAmelCase = do_thumbnail
_UpperCAmelCase = do_align_axis
_UpperCAmelCase = do_pad
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[str] = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 329 | 0 |
def _a ( a :int , a :int ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
a = str(bin(a__ ) )[2:] # remove the leading "0b"
a = str(bin(a__ ) )[2:] # remove the leading "0b"
a = max(len(a__ ) , len(a__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(a__ ) , b_binary.zfill(a__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : List[str] = 'openai-gpt'
_a : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = afn
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowercase: List[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__(self , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
a = {}
if "candidate_labels" in kwargs:
a = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
a = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a sound of {}." ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
a = requests.get(_SCREAMING_SNAKE_CASE ).content
else:
with open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
a = f.read()
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a = ffmpeg_read(_SCREAMING_SNAKE_CASE , self.feature_extractor.sampling_rate )
if not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
a = candidate_labels
a = [hypothesis_template.format(_SCREAMING_SNAKE_CASE ) for x in candidate_labels]
a = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=_SCREAMING_SNAKE_CASE )
a = [text_inputs]
return inputs
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = model_inputs.pop("candidate_labels" )
a = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , _SCREAMING_SNAKE_CASE ):
a = text_inputs[0]
else:
# Batching case.
a = text_inputs[0][0]
a = self.model(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
a = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = model_outputs.pop("candidate_labels" )
a = model_outputs["logits"][0]
if self.framework == "pt":
a = logits.softmax(dim=0 )
a = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
a = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , key=lambda lowerCamelCase_ : -x[0] )
]
return result
| 227 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
| 329 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowercase__ : str = None
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : Union[str, Any] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
lowercase__ : Union[str, Any] = {
'''facebook/nllb-large-en-ro''': 10_24,
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
lowercase__ : Dict = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = ['input_ids', 'attention_mask']
_snake_case : Dict = NllbTokenizer
_snake_case : List[int] = []
_snake_case : List[int] = []
def __init__( self : List[str] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : List[Any]="<s>" , lowerCAmelCase__ : Any="</s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : Tuple="<unk>" , lowerCAmelCase__ : Union[str, Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=False , **lowerCAmelCase__ : List[str] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
_UpperCamelCase = legacy_behaviour
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
_UpperCamelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
_UpperCamelCase = {
lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCamelCase = src_lang if src_lang is not None else '''eng_Latn'''
_UpperCamelCase = self.convert_tokens_to_ids(self._src_lang )
_UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Any ) -> None:
'''simple docstring'''
_UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case__ ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , **lowerCAmelCase__ : Dict ) -> Tuple:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_UpperCamelCase = src_lang
_UpperCamelCase = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCamelCase = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
_UpperCamelCase = tgt_lang_id
return inputs
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] = "eng_Latn" , lowerCAmelCase__ : Any = None , lowerCAmelCase__ : Any = "fra_Latn" , **lowerCAmelCase__ : Union[str, Any] , ) -> BatchEncoding:
'''simple docstring'''
_UpperCamelCase = src_lang
_UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case__ ( self : str , lowerCAmelCase__ : Optional[Any] ) -> None:
'''simple docstring'''
_UpperCamelCase = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
if self.legacy_behaviour:
_UpperCamelCase = []
_UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase = [self.cur_lang_code]
_UpperCamelCase = [self.eos_token_id]
_UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case__ ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> None:
'''simple docstring'''
_UpperCamelCase = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
if self.legacy_behaviour:
_UpperCamelCase = []
_UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase = [self.cur_lang_code]
_UpperCamelCase = [self.eos_token_id]
_UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 324 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ :Optional[int] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any:
'''simple docstring'''
require_version(deps[pkg] , a__ )
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase : List[str] = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 |
from __future__ import annotations
def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start]
while stack:
_UpperCAmelCase = stack.pop()
explored.add(a__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a__ )
return explored
lowerCAmelCase__ :Tuple = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 329 | 0 |
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_UpperCamelCase = get_logger(__name__)
_UpperCamelCase = R'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class lowerCamelCase_ :
"""simple docstring"""
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self : Dict , _a : List[Any] , _a : int ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCamelCase_ :
"""simple docstring"""
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self : List[str] , _a : int , _a : Tuple ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self : Optional[Any] , _a : str , _a : Union[str, Any] , _a : Optional[Any] , **_a : Optional[int] ) -> jnp.ndarray:
for processor in self:
__lowerCamelCase : Any = inspect.signature(processor.__call__ ).parameters
if len(_SCREAMING_SNAKE_CASE ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
__lowerCamelCase : Tuple = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
__lowerCamelCase : List[Any] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return scores
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Optional[int] , _a : Optional[Any] ) -> Optional[int]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
__lowerCamelCase : Optional[Any] = temperature
def __call__( self : Union[str, Any] , _a : List[Any] , _a : Any , _a : Tuple ) -> jnp.ndarray:
__lowerCamelCase : int = scores / self.temperature
return scores
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Any , _a : int , _a : Tuple = -float('Inf' ) , _a : List[Any] = 1 ) -> str:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
__lowerCamelCase : Any = top_p
__lowerCamelCase : str = filter_value
__lowerCamelCase : Optional[Any] = min_tokens_to_keep
def __call__( self : Optional[int] , _a : str , _a : Any , _a : List[str] ) -> jnp.ndarray:
__lowerCamelCase ,__lowerCamelCase : Any = lax.top_k(_SCREAMING_SNAKE_CASE , scores.shape[-1] )
__lowerCamelCase : Optional[Any] = jnp.full_like(_SCREAMING_SNAKE_CASE , self.filter_value )
__lowerCamelCase : Optional[int] = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 ).cumsum(axis=-1 )
__lowerCamelCase : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCamelCase : Dict = jnp.roll(_SCREAMING_SNAKE_CASE , 1 )
score_mask |= score_mask.at[:, 0].set(_SCREAMING_SNAKE_CASE )
# min tokens to keep
__lowerCamelCase : int = score_mask.at[:, : self.min_tokens_to_keep].set(_SCREAMING_SNAKE_CASE )
__lowerCamelCase : Tuple = jnp.where(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict = jax.lax.sort_key_val(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[-1]
return next_scores
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : str , _a : Union[str, Any] , _a : Optional[Any] = -float('Inf' ) , _a : Optional[Any] = 1 ) -> Optional[int]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
__lowerCamelCase : List[str] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = filter_value
def __call__( self : int , _a : List[str] , _a : List[str] , _a : Dict ) -> jnp.ndarray:
__lowerCamelCase ,__lowerCamelCase : Union[str, Any] = scores.shape
__lowerCamelCase : Tuple = jnp.full(batch_size * vocab_size , self.filter_value )
__lowerCamelCase : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check
__lowerCamelCase ,__lowerCamelCase : Dict = lax.top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] = jnp.broadcast_to((jnp.arange(_SCREAMING_SNAKE_CASE ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__lowerCamelCase : Optional[Any] = topk_scores.flatten()
__lowerCamelCase : Dict = topk_indices.flatten() + shift
__lowerCamelCase : int = next_scores_flat.at[topk_indices_flat].set(_SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = next_scores_flat.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return next_scores
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _a : Optional[int] ) -> Tuple:
__lowerCamelCase : Union[str, Any] = bos_token_id
def __call__( self : str , _a : int , _a : Optional[Any] , _a : Any ) -> jnp.ndarray:
__lowerCamelCase : Any = jnp.full(scores.shape , -float('inf' ) )
__lowerCamelCase : int = 1 - jnp.bool_(cur_len - 1 )
__lowerCamelCase : Union[str, Any] = jnp.where(_SCREAMING_SNAKE_CASE , new_scores.at[:, self.bos_token_id].set(0 ) , _SCREAMING_SNAKE_CASE )
return scores
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : List[Any] , _a : str , _a : Any ) -> Optional[Any]:
__lowerCamelCase : int = max_length
__lowerCamelCase : Dict = eos_token_id
def __call__( self : str , _a : Tuple , _a : List[Any] , _a : List[Any] ) -> jnp.ndarray:
__lowerCamelCase : Any = jnp.full(scores.shape , -float('inf' ) )
__lowerCamelCase : int = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__lowerCamelCase : Any = jnp.where(_SCREAMING_SNAKE_CASE , new_scores.at[:, self.eos_token_id].set(0 ) , _SCREAMING_SNAKE_CASE )
return scores
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : int , _a : List[Any] , _a : Dict ) -> List[Any]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
__lowerCamelCase : List[str] = min_length
__lowerCamelCase : Optional[int] = eos_token_id
def __call__( self : List[str] , _a : Optional[Any] , _a : str , _a : Any ) -> jnp.ndarray:
__lowerCamelCase : Tuple = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__lowerCamelCase : int = jnp.where(_SCREAMING_SNAKE_CASE , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _SCREAMING_SNAKE_CASE )
return scores
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _a : List[str] , _a : Union[str, Any] ) -> Tuple:
__lowerCamelCase : Optional[int] = list(_SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] = begin_index
def __call__( self : Optional[int] , _a : Tuple , _a : Any , _a : Union[str, Any] ) -> Any:
__lowerCamelCase : Dict = 1 - jnp.bool_(cur_len - self.begin_index )
__lowerCamelCase : List[str] = jnp.where(_SCREAMING_SNAKE_CASE , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _SCREAMING_SNAKE_CASE )
return scores
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _a : Any ) -> int:
__lowerCamelCase : Any = list(_SCREAMING_SNAKE_CASE )
def __call__( self : Any , _a : Optional[Any] , _a : Union[str, Any] , _a : List[str] ) -> jnp.ndarray:
__lowerCamelCase : Optional[int] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : int , _a : Any ) -> Optional[int]:
__lowerCamelCase : Dict = dict(_SCREAMING_SNAKE_CASE )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCamelCase : List[str] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCamelCase : Union[str, Any] = force_token_array.at[index].set(_SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = jnp.intaa(_SCREAMING_SNAKE_CASE )
def __call__( self : Tuple , _a : str , _a : List[Any] , _a : List[Any] ) -> jnp.ndarray:
def _force_token(_a : Any ):
__lowerCamelCase : Any = scores.shape[0]
__lowerCamelCase : str = self.force_token_array[generation_idx]
__lowerCamelCase : int = jnp.ones_like(_SCREAMING_SNAKE_CASE , dtype=scores.dtype ) * -float('inf' )
__lowerCamelCase : Optional[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__lowerCamelCase : Union[str, Any] = lax.dynamic_update_slice(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (0, current_token) )
return new_scores
__lowerCamelCase : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_SCREAMING_SNAKE_CASE ) , lambda: scores , ) , )
return scores
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : List[Any] , _a : str , _a : Dict , _a : str ) -> List[Any]:
__lowerCamelCase : Any = generate_config.eos_token_id
__lowerCamelCase : List[Any] = generate_config.no_timestamps_token_id
__lowerCamelCase : Optional[int] = generate_config.no_timestamps_token_id + 1
__lowerCamelCase : Optional[Any] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_SCREAMING_SNAKE_CASE , 'max_initial_timestamp_index' ):
__lowerCamelCase : str = generate_config.max_initial_timestamp_index
else:
__lowerCamelCase : Optional[Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCamelCase : List[str] = model_config.vocab_size
def __call__( self : List[Any] , _a : Optional[Any] , _a : Optional[Any] , _a : Dict ) -> int:
__lowerCamelCase : Optional[int] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_a : Dict , _a : int ):
__lowerCamelCase : int = jnp.where((cur_len - self.begin_index) >= 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _SCREAMING_SNAKE_CASE , )
__lowerCamelCase : str = jnp.where((cur_len - self.begin_index) < 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return jnp.where(
_SCREAMING_SNAKE_CASE , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _SCREAMING_SNAKE_CASE , )
__lowerCamelCase : Union[str, Any] = jax.vmap(_SCREAMING_SNAKE_CASE )(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = jnp.where(cur_len == self.begin_index , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _SCREAMING_SNAKE_CASE , )
__lowerCamelCase : Tuple = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCamelCase : List[Any] = jnp.where(
_SCREAMING_SNAKE_CASE , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _SCREAMING_SNAKE_CASE , )
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCamelCase : Any = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
def handle_cumulative_probs(_a : Any , _a : str ):
__lowerCamelCase : Optional[int] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__lowerCamelCase : Dict = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _SCREAMING_SNAKE_CASE , )
__lowerCamelCase : Tuple = jax.vmap(_SCREAMING_SNAKE_CASE )(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return scores
| 208 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 329 | 0 |
def __lowerCAmelCase ( a__ ) -> int:
if not isinstance(a__ , a__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ :int = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class __a ( UpperCAmelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if prompt is not None:
_UpperCAmelCase = prompt
if generate_kwargs is not None:
_UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
_UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
_UpperCAmelCase = self.model.config.model_type
if model_type == "git":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids
_UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_UpperCAmelCase = None
return model_inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['input_ids'] )
):
_UpperCAmelCase = None
if generate_kwargs is None:
_UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCAmelCase = model_inputs.pop(self.model.main_input_name )
_UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
for output_ids in model_outputs:
_UpperCAmelCase = {
'generated_text': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , )
}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 329 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 36 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase = take_from
_UpperCAmelCase = ()
if not isinstance(args[0] , a__ ):
_UpperCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
_UpperCAmelCase = None
if isinstance(a__ , a__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(a__ ),)
_UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(a__ , a__ ):
values += (getattr(a__ , a__ ),)
_UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_UpperCAmelCase = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , a__ , stacklevel=a__ )
if isinstance(a__ , a__ ) and len(a__ ) > 0:
_UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase = call_frame.filename
_UpperCAmelCase = call_frame.lineno
_UpperCAmelCase = call_frame.function
_UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(a__ ) == 0:
return
elif len(a__ ) == 1:
return values[0]
return values
| 329 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = 'mra'
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=50_265 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Tuple=3_072 , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1E-5 , __SCREAMING_SNAKE_CASE : List[Any]="absolute" , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : Optional[Any]="full" , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = block_per_row
__SCREAMING_SNAKE_CASE = approx_mode
__SCREAMING_SNAKE_CASE = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE = initial_prior_diagonal_n_blocks
| 267 |
import math
lowerCAmelCase__ :Optional[int] = 1_0
lowerCAmelCase__ :Optional[Any] = 7
lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( a__: int = 2_0 ) -> str:
'''simple docstring'''
_UpperCAmelCase = math.comb(a__ , a__ )
_UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ )
_UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 329 | 0 |
snake_case__ : str = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
snake_case__ : str = {value: key for key, value in MORSE_CODE_DICT.items()}
def _a ( lowerCamelCase: str ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _a ( lowerCamelCase: str ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def _a ( ) -> None:
'''simple docstring'''
__A = '''Morse code here!'''
print(a__ )
__A = encrypt(a__ )
print(a__ )
__A = decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 117 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329 | 0 |
'''simple docstring'''
from string import ascii_uppercase
a__ : Any = {char: i for i, char in enumerate(ascii_uppercase)}
a__ : Optional[int] = dict(enumerate(ascii_uppercase))
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> str:
"""simple docstring"""
__A = len(a__ )
__A = 0
while True:
if x == i:
__A = 0
if len(a__ ) == len(a__ ):
break
key += key[i]
i += 1
return key
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> str:
"""simple docstring"""
__A = ''
__A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__A = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> str:
"""simple docstring"""
__A = ''
__A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__A = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def snake_case ( )-> None:
"""simple docstring"""
__A = 'THE GERMAN ATTACK'
__A = 'SECRET'
__A = generate_key(a__ , a__ )
__A = cipher_text(a__ , a__ )
print(f'Encrypted Text = {s}' )
print(f'Original Text = {original_text(a__ , a__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 161 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoConfig.from_pretrained(a__ )
_UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ )
_UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ )
_UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
_UpperCAmelCase = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_UpperCAmelCase = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_global_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = tax_mlp_layer_norm
_UpperCAmelCase = flax_model_encoder_layer_block
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_global_rel_embedding
# Assigning
_UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale']
_UpperCAmelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
_UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_pre_attention_layer_norm
_UpperCAmelCase = tax_enc_dec_attention_key
_UpperCAmelCase = tax_enc_dec_attention_out
_UpperCAmelCase = tax_enc_dec_attention_query
_UpperCAmelCase = tax_enc_dec_attention_value
_UpperCAmelCase = tax_cross_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = txa_mlp_layer_norm
_UpperCAmelCase = flax_model_decoder_layer_block
# Decoder Normalization
_UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale']
_UpperCAmelCase = txa_decoder_norm
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_decoder_rel_embedding
# Token Embeddings
_UpperCAmelCase = tax_model['target']['token_embedder']['embedding']
_UpperCAmelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(a__ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
lowerCAmelCase__ :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowerCAmelCase__ :List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 329 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCamelCase__ ( _A ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def lowerCamelCase__ ( _A ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = {}
snake_case_ = []
snake_case_ = 1
snake_case_ = [1, 2]
snake_case_ = {"a": 1, "b": 2}
snake_case_ = {"a": [1, 2], "b": [3, 4]}
snake_case_ = {"a": {"1": 1}, "b": 2}
snake_case_ = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ = {}
snake_case_ = []
snake_case_ = 2
snake_case_ = [2, 3]
snake_case_ = {"a": 2, "b": 3}
snake_case_ = {"a": [2, 3], "b": [4, 5]}
snake_case_ = {"a": {"1": 2}, "b": 3}
snake_case_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
snake_case_ = 2
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
snake_case_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
snake_case_ = {"a": 2, "b": 0, "c": 2}
snake_case_ = {
"a": np.eye(2 ).astype(_SCREAMING_SNAKE_CASE ),
"b": np.zeros(3 ).astype(_SCREAMING_SNAKE_CASE ),
"c": np.ones(2 ).astype(_SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda __lowercase : x + 1 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = {"a": 1, "b": 2}
snake_case_ = {"a": 3, "b": 4}
snake_case_ = {"a": 5, "b": 6}
snake_case_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Dict ):
"""simple docstring"""
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 'bar'
snake_case_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(_SCREAMING_SNAKE_CASE , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
snake_case_ = {f"{i}": i for i in range(a__ )}
snake_case_ = map_nested(lambda _A : x + 10 , a__ , num_proc=a__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@require_tf
def snake_case__ ( self : Any ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
snake_case_ = layers.Dense(2 )
def gen_random_output():
snake_case_ = tf.random.uniform((1, 3) )
return model(_SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
snake_case_ = gen_random_output()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
snake_case_ = gen_random_output()
snake_case_ = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def snake_case__ ( self : int ):
"""simple docstring"""
import torch
def gen_random_output():
snake_case_ = torch.nn.Linear(3 , 2 )
snake_case_ = torch.rand(1 , 3 )
return model(_SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
snake_case_ = gen_random_output()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
snake_case_ = gen_random_output()
snake_case_ = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
snake_case_ = gen_random_output()
with temp_seed(42 ):
snake_case_ = gen_random_output()
snake_case_ = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = NestedDataStructure(a__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = NestedDataStructure(a__ ).flatten()
assert output == expected_output
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = A(x=1 , y="foobar" )
snake_case_ = {"x": 1, "y": "foobar"}
assert asdict(a__ ) == expected_output
snake_case_ = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
snake_case_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(a__ ) == expected_output
with pytest.raises(a__ ):
asdict([1, A(x=10 , y="foo" )] )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return text.split()
def lowerCamelCase__ ( _A ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCamelCase__ ( ):
'''simple docstring'''
with Pool(2 ) as pool:
snake_case_ = list(iflatmap_unordered(a__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(a__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
snake_case_ = list(iflatmap_unordered(a__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(a__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
snake_case_ = []
for yield_time, content in iflatmap_unordered(
a__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(a__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(a__ ) == 4
| 187 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : str = 'ctrl'
_a : Tuple = ['past_key_values']
_a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 0 |
import datasets
UpperCAmelCase__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
UpperCAmelCase__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
UpperCAmelCase__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def _a ( a :str , a :Dict ) -> Any:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ) ->Any:
"""simple docstring"""
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
| 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : str = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'image_id': 39769, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 329 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = 'char'
__A = 'bpe'
__A = 'wp'
_lowercase: Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = ['image_processor', 'char_tokenizer']
__A = 'ViTImageProcessor'
__A = 'MgpstrTokenizer'
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _SCREAMING_SNAKE_CASE , )
a = kwargs.pop("feature_extractor" )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
a = tokenizer
a = AutoTokenizer.from_pretrained("gpt2" )
a = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
a = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
a = self.char_tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
a = encodings["input_ids"]
return inputs
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a , a , a = sequences
a = char_preds.size(0 )
a , a = self._decode_helper(_SCREAMING_SNAKE_CASE , "char" )
a , a = self._decode_helper(_SCREAMING_SNAKE_CASE , "bpe" )
a , a = self._decode_helper(_SCREAMING_SNAKE_CASE , "wp" )
a = []
a = []
for i in range(_SCREAMING_SNAKE_CASE ):
a = [char_scores[i], bpe_scores[i], wp_scores[i]]
a = [char_strs[i], bpe_strs[i], wp_strs[i]]
a = scores.index(max(_SCREAMING_SNAKE_CASE ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
a = {}
a = final_strs
a = final_scores
a = char_strs
a = bpe_strs
a = wp_strs
return out
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
a = self.char_decode
a = 1
a = "[s]"
elif format == DecodeType.BPE:
a = self.bpe_decode
a = 2
a = "#"
elif format == DecodeType.WORDPIECE:
a = self.wp_decode
a = 102
a = "[SEP]"
else:
raise ValueError(F'''Format {format} is not supported.''' )
a , a = [], []
a = pred_logits.size(0 )
a = pred_logits.size(1 )
a , a = pred_logits.topk(1 , dim=-1 , largest=_SCREAMING_SNAKE_CASE , sorted=_SCREAMING_SNAKE_CASE )
a = preds_index.view(-1 , _SCREAMING_SNAKE_CASE )[:, 1:]
a = decoder(_SCREAMING_SNAKE_CASE )
a , a = torch.nn.functional.softmax(_SCREAMING_SNAKE_CASE , dim=2 ).max(dim=2 )
a = preds_max_prob[:, 1:]
for index in range(_SCREAMING_SNAKE_CASE ):
a = preds_str[index].find(_SCREAMING_SNAKE_CASE )
a = preds_str[index][:pred_eos]
a = preds_index[index].cpu().tolist()
a = pred_index.index(_SCREAMING_SNAKE_CASE ) if eos_token in pred_index else -1
a = preds_max_prob[index][: pred_eos_index + 1]
a = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_SCREAMING_SNAKE_CASE )
conf_scores.append(_SCREAMING_SNAKE_CASE )
return dec_strs, conf_scores
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )]
return decode_strs
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )]
return decode_strs
| 227 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a ( unittest.TestCase ):
_a : List[str] = JukeboxTokenizer
_a : List[Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 329 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : str , ) -> Any:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 32
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = '''gelu'''
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 512
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = None
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = TFDistilBertModel(config=_SCREAMING_SNAKE_CASE )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ) -> str:
'''simple docstring'''
_UpperCamelCase = TFDistilBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFDistilBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
_UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDistilBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = TFDistilBertForMultipleChoice(_SCREAMING_SNAKE_CASE )
_UpperCamelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
_UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDistilBertForTokenClassification(_SCREAMING_SNAKE_CASE )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_snake_case : int = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[Any] = False
def snake_case__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = TFDistilBertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37 )
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def snake_case__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_UpperCamelCase = TFDistilBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(_SCREAMING_SNAKE_CASE )[0]
_UpperCamelCase = [1, 6, 768]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
_UpperCamelCase = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
| 324 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_UpperCAmelCase = parser.parse_args()
return args
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
def fn(a__: str ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase__ ( a__: List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_UpperCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_UpperCAmelCase = tf.train.Features(feature=a__ )
_UpperCAmelCase = tf.train.Example(features=a__ )
_UpperCAmelCase = example.SerializeToString()
records.append(a__ )
return records
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCAmelCase = min(len(a__ ) , args.limit )
_UpperCAmelCase = dataset.select(range(a__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCAmelCase = tokenize_function(a__ )
_UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a__: Optional[int] ):
# Concatenate all texts.
_UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
_UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCAmelCase = len(dataset_snapshot['input_ids'] )
_UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCAmelCase = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
_UpperCAmelCase = serialized_examples[i]
out_file.write(a__ )
print('Wrote file {} containing {} records'.format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = parse_args()
main(args)
| 329 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : str = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
__A : List[str] = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__A : List[Any] = components[:-1] + [test_fn.replace('.py' , '' )]
__A : List[Any] = '.'.join(a__ )
return test_module_path
def _SCREAMING_SNAKE_CASE ( a ) -> Union[str, Any]:
__A : Tuple = get_module_path(a__ )
__A : Dict = importlib.import_module(a__ )
return test_module
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : Tuple = []
__A : str = get_test_module(a__ )
for attr in dir(a__ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(a__ , a__ ) )
# sort with class names
return sorted(a__ , key=lambda a : x.__name__ )
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
__A : Optional[int] = []
__A : int = get_test_module(a__ )
for attr in dir(a__ ):
__A : Dict = getattr(a__ , a__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__A : str = getattr(a__ , 'all_model_classes' , [] )
if len(a__ ) > 0:
test_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda a : x.__name__ )
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
__A : Optional[Any] = get_test_classes(a__ )
__A : Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(a__ , key=lambda a : x.__name__ )
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : Dict = test_class()
if hasattr(a__ , 'setUp' ):
test.setUp()
__A : List[str] = None
if hasattr(a__ , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__A : Optional[int] = test.model_tester.__class__
return model_tester
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
__A : str = get_test_classes(a__ )
__A : List[str] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda a : x.__name__ )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Any:
__A : List[Any] = get_test_classes_for_model(a__ , a__ )
__A : Dict = []
for test_class in test_classes:
__A : Optional[int] = get_model_tester_from_test_class(a__ )
if tester_class is not None:
tester_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda a : x.__name__ )
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[str] = get_test_classes(a__ )
__A : Dict = {test_class: get_model_tester_from_test_class(a__ ) for test_class in test_classes}
return test_tester_mapping
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : Union[str, Any] = get_model_classes(a__ )
__A : List[str] = {
model_class: get_test_classes_for_model(a__ , a__ ) for model_class in model_classes
}
return model_test_mapping
def _SCREAMING_SNAKE_CASE ( a ) -> Union[str, Any]:
__A : str = get_model_classes(a__ )
__A : Any = {
model_class: get_tester_classes_for_model(a__ , a__ ) for model_class in model_classes
}
return model_to_tester_mapping
def _SCREAMING_SNAKE_CASE ( a ) -> str:
if isinstance(a__ , a__ ):
return o
elif isinstance(a__ , a__ ):
return o.__name__
elif isinstance(a__ , (list, tuple) ):
return [to_json(a__ ) for x in o]
elif isinstance(a__ , a__ ):
return {to_json(a__ ): to_json(a__ ) for k, v in o.items()}
else:
return o
| 280 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = []
for step in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(a__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , a__ )
_UpperCAmelCase = torch.load(a__ )
scheduler.load_state_dict(a__ )
return lrs
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __a ( unittest.TestCase ):
_a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None
_a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_a : List[Any] = 10
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' )
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = fn
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCamelCase = pytest.mark.integration
@pytest.mark.parametrize('path' ,['paws', 'csv'] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Any:
inspect_dataset(a__ ,a__ )
__lowerCamelCase : Optional[int] = path + '.py'
assert script_name in os.listdir(a__ )
assert "__pycache__" not in os.listdir(a__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' ,['accuracy'] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> str:
inspect_metric(a__ ,a__ )
__lowerCamelCase : Optional[int] = path + '.py'
assert script_name in os.listdir(a__ )
assert "__pycache__" not in os.listdir(a__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> str:
__lowerCamelCase : Optional[int] = get_dataset_config_info(a__ ,config_name=a__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
with pytest.raises(a__ ):
get_dataset_config_info(a__ ,config_name=a__ )
@pytest.mark.parametrize(
'path, expected' ,[
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] ,)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> int:
__lowerCamelCase : Any = get_dataset_config_names(a__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' ,[
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] ,)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = get_dataset_infos(a__ )
assert list(infos.keys() ) == expected_configs
__lowerCamelCase : List[Any] = expected_configs[0]
assert expected_config in infos
__lowerCamelCase : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Dict:
__lowerCamelCase : Optional[Any] = get_dataset_infos(a__ )
assert expected_config in infos
__lowerCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
with pytest.raises(a__ ):
get_dataset_split_names(a__ ,config_name=a__ )
| 208 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :Any = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = original_name.split('.' )[0]
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] )
_UpperCAmelCase = orig_block_num - offset
_UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_UpperCAmelCase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase = key[: key.find('proj' )]
_UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' )
_UpperCAmelCase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' )
if "norm2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = model_name[-3:]
_UpperCAmelCase = 1_0_0_0
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_0_0_0)
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase = [2, 2, 6, 2]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s24":
_UpperCAmelCase = [4, 4, 1_2, 4]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.9
elif size == "m36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
elif size == "m48":
_UpperCAmelCase = [8, 8, 2_4, 8]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
# Prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(a__ )
# create HuggingFace model and load state dict
_UpperCAmelCase = PoolFormerForImageClassification(a__ )
model.load_state_dict(a__ )
model.eval()
# Define image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_UpperCAmelCase = model(a__ )
_UpperCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __A( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=400 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=True , _snake_case=[0.5, 0.5, 0.5] , _snake_case=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size if size is not None else {'''height''': 18, '''width''': 20}
__a = do_thumbnail
__a = do_align_axis
__a = do_pad
__a = do_normalize
__a = image_mean
__a = image_std
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __A( a , unittest.TestCase ):
snake_case_ = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''do_pad''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''image_std''' ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , ) | 6 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = process
_UpperCAmelCase = params
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dataset[i]
_UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params )
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = loader
_UpperCAmelCase = infer
_UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase = None
_UpperCAmelCase = loader_batch_size
# Internal bookkeeping
_UpperCAmelCase = None
_UpperCAmelCase = None
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase = next(self.iterator )
_UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase = processed
_UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
_UpperCAmelCase = None
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.subiterator is None:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCAmelCase = next(self.subiterator )
return processed
class __a ( UpperCAmelCase ):
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
_UpperCAmelCase = processed
_UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_UpperCAmelCase = processed
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
return accumulator
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = key
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = keya
_UpperCAmelCase = keya
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 329 | 0 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : str = 2
while i * i <= n:
_lowerCAmelCase : Optional[int] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = 1
_lowerCAmelCase : Dict = 1
while True:
i += 1
t_num += i
if count_divisors(a__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 36 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __a ( UpperCAmelCase ):
_a : str = 'data2vec-text'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 329 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AudioLDMPipeline
lowerCAmelCase__ = TEXT_TO_AUDIO_PARAMS
lowerCAmelCase__ = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
__SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
__SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=0 ) -> Any:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 256
__SCREAMING_SNAKE_CASE = audio[:10]
__SCREAMING_SNAKE_CASE = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 3 * [inputs["""prompt"""]]
# forward
__SCREAMING_SNAKE_CASE = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.audios[0]
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 3 * [inputs.pop("""prompt""" )]
__SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
__SCREAMING_SNAKE_CASE = text_inputs["""input_ids"""].to(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
_SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__SCREAMING_SNAKE_CASE = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
__SCREAMING_SNAKE_CASE = prompt_embeds
# forward
__SCREAMING_SNAKE_CASE = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 3 * ["""this is a negative prompt"""]
__SCREAMING_SNAKE_CASE = negative_prompt
__SCREAMING_SNAKE_CASE = 3 * [inputs["""prompt"""]]
# forward
__SCREAMING_SNAKE_CASE = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.audios[0]
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 3 * [inputs.pop("""prompt""" )]
__SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
__SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
__SCREAMING_SNAKE_CASE = text_inputs["""input_ids"""].to(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
_SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__SCREAMING_SNAKE_CASE = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
embeds.append(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = embeds
# forward
__SCREAMING_SNAKE_CASE = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """egg cracking"""
__SCREAMING_SNAKE_CASE = audioldm_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 256
__SCREAMING_SNAKE_CASE = audio[:10]
__SCREAMING_SNAKE_CASE = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
__SCREAMING_SNAKE_CASE = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.016
__SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ["""hey"""]
__SCREAMING_SNAKE_CASE = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 )
__SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 256)
__SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 )
__SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any="cpu" , __SCREAMING_SNAKE_CASE : Dict=torch.floataa , __SCREAMING_SNAKE_CASE : str=0 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 8, 128, 16) )
__SCREAMING_SNAKE_CASE = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__SCREAMING_SNAKE_CASE = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_inputs(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 25
__SCREAMING_SNAKE_CASE = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 81_920
__SCREAMING_SNAKE_CASE = audio[77_230:77_240]
__SCREAMING_SNAKE_CASE = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
__SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__SCREAMING_SNAKE_CASE = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_inputs(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 81_920
__SCREAMING_SNAKE_CASE = audio[27_780:27_790]
__SCREAMING_SNAKE_CASE = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
__SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 267 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_a : Optional[int] = False
_a : List[str] = False
_a : List[str] = False
_a : Optional[int] = False
_a : Tuple = False
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has'''
f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class __a ( unittest.TestCase , UpperCAmelCase ):
_a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_a : Any = MaskFormerSwinConfig
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 329 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A_ :
def __init__(self :Optional[int] , _UpperCamelCase :Optional[int] , _UpperCamelCase :int=13 , _UpperCamelCase :int=7 , _UpperCamelCase :List[str]=True , _UpperCamelCase :Optional[Any]=True , _UpperCamelCase :Union[str, Any]=True , _UpperCamelCase :int=True , _UpperCamelCase :List[Any]=99 , _UpperCamelCase :List[str]=32 , _UpperCamelCase :List[Any]=2 , _UpperCamelCase :Union[str, Any]=4 , _UpperCamelCase :int=37 , _UpperCamelCase :Optional[int]="gelu" , _UpperCamelCase :Union[str, Any]=0.1 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :Optional[Any]=512 , _UpperCamelCase :Tuple=16 , _UpperCamelCase :Optional[int]=2 , _UpperCamelCase :Any=0.0_2 , _UpperCamelCase :List[str]=3 , _UpperCamelCase :Any=4 , _UpperCamelCase :List[Any]=None , )-> List[Any]:
__A = parent
__A = 13
__A = 7
__A = True
__A = True
__A = True
__A = True
__A = 99
__A = 384
__A = 2
__A = 4
__A = 37
__A = '''gelu'''
__A = 0.1
__A = 0.1
__A = 512
__A = 16
__A = 2
__A = 0.0_2
__A = 3
__A = 4
__A = 128
__A = 2
__A = 9
__A = 1
__A = None
def _lowerCAmelCase (self :Any )-> int:
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Any , _UpperCamelCase :str , _UpperCamelCase :Optional[Any] , _UpperCamelCase :List[str] , _UpperCamelCase :Tuple , _UpperCamelCase :Optional[Any] , _UpperCamelCase :str )-> int:
__A = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__A = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__A = [input_ids, input_mask]
__A = model(_SCREAMING_SNAKE_CASE )
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :List[Any] , _UpperCamelCase :Dict , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Optional[int] , _UpperCamelCase :Dict , _UpperCamelCase :Optional[Any] , _UpperCamelCase :List[Any] )-> List[Any]:
__A = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__A = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Optional[int] , _UpperCamelCase :List[str] , _UpperCamelCase :Optional[int] , _UpperCamelCase :Any , _UpperCamelCase :str , _UpperCamelCase :str )-> Optional[int]:
__A = self.num_labels
__A = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__A = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :List[str] , _UpperCamelCase :List[str] , _UpperCamelCase :int , _UpperCamelCase :Tuple , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Dict , _UpperCamelCase :Optional[Any] )-> Optional[Any]:
__A = self.num_choices
__A = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__A = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__A = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__A = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__A = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Optional[Any] , _UpperCamelCase :str , _UpperCamelCase :List[str] , _UpperCamelCase :Tuple , _UpperCamelCase :Optional[Any] , _UpperCamelCase :List[str] )-> Any:
__A = self.num_labels
__A = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__A = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase (self :Dict , _UpperCamelCase :List[Any] , _UpperCamelCase :Tuple , _UpperCamelCase :List[str] , _UpperCamelCase :List[Any] , _UpperCamelCase :Tuple , _UpperCamelCase :str , _UpperCamelCase :Optional[int] )-> Optional[int]:
__A = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__A = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase (self :List[str] )-> str:
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowerCAmelCase (self :Union[str, Any] )-> Optional[Any]:
__A = TFConvBertModelTester(self )
__A = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowerCAmelCase (self :Any )-> List[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase (self :List[Any] )-> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase (self :Union[str, Any] )-> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase (self :Any )-> Union[str, Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase (self :List[Any] )-> int:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase (self :Optional[Any] )-> Dict:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase (self :int )-> Tuple:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase (self :str )-> Optional[Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
__A = True
if hasattr(_SCREAMING_SNAKE_CASE , '''use_cache''' ):
__A = True
__A = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__A = getattr(self.model_tester , '''key_length''' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__A = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A = model_class(_SCREAMING_SNAKE_CASE )
__A = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__A = os.path.join(_SCREAMING_SNAKE_CASE , '''saved_model''' , '''1''' )
__A = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__A = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__A = outputs['''encoder_hidden_states''']
__A = outputs['''encoder_attentions''']
else:
__A = outputs['''hidden_states''']
__A = outputs['''attentions''']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__A = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowerCAmelCase (self :Dict )-> Any:
__A = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase (self :Union[str, Any] )-> Optional[int]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
__A = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
__A = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__A = getattr(self.model_tester , '''key_length''' , _SCREAMING_SNAKE_CASE )
__A = getattr(self.model_tester , '''key_length''' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_UpperCamelCase :List[Any] ):
__A = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__A = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCamelCase :str ):
__A = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A = True
__A = False
__A = model_class(_SCREAMING_SNAKE_CASE )
__A = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__A = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__A = model_class(_SCREAMING_SNAKE_CASE )
__A = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A = True
__A = model_class(_SCREAMING_SNAKE_CASE )
__A = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__A = True
__A = True
__A = model_class(_SCREAMING_SNAKE_CASE )
__A = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A_ ( unittest.TestCase ):
@slow
def _lowerCAmelCase (self :Tuple )-> List[str]:
__A = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
__A = tf.constant([[0, 1, 2, 3, 4, 5]] )
__A = model(_SCREAMING_SNAKE_CASE )[0]
__A = [1, 6, 768]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__A = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
| 117 |
from collections.abc import Generator
def lowerCAmelCase__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 329 | 0 |
'''simple docstring'''
from collections.abc import Callable
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> float:
"""simple docstring"""
__A = a
__A = b
if function(a__ ) == 0: # one of the a or b is a root for the function
return a
elif function(a__ ) == 0:
return b
elif (
function(a__ ) * function(a__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
__A = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(a__ ) == 0:
return mid
elif function(a__ ) * function(a__ ) < 0:
__A = mid
else:
__A = mid
__A = start + (end - start) / 2.0
return mid
def snake_case ( UpperCAmelCase )-> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 161 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 30}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 329 | 0 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : int , __lowercase : Any , __lowercase : Any , __lowercase : str = 0 ):
"""simple docstring"""
snake_case_ , snake_case_ = row, column
snake_case_ = [[default_value for c in range(_SCREAMING_SNAKE_CASE )] for r in range(_SCREAMING_SNAKE_CASE )]
def __str__( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
snake_case_ = 0
for row_vector in self.array:
for obj in row_vector:
snake_case_ = max(_SCREAMING_SNAKE_CASE , len(str(_SCREAMING_SNAKE_CASE ) ) )
snake_case_ = f"%{max_element_length}s"
# Make string and return
def single_line(__lowercase : Dict ) -> str:
nonlocal string_format_identifier
snake_case_ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_SCREAMING_SNAKE_CASE ) for row_vector in self.array )
return s
def __repr__( self : str ):
"""simple docstring"""
return str(self )
def snake_case__ ( self : Optional[Any] , __lowercase : str ):
"""simple docstring"""
if not (isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and len(_SCREAMING_SNAKE_CASE ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , __lowercase : List[Any] ):
"""simple docstring"""
assert self.validate_indicies(_SCREAMING_SNAKE_CASE )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __lowercase : Any , __lowercase : str ):
"""simple docstring"""
assert self.validate_indicies(_SCREAMING_SNAKE_CASE )
snake_case_ = value
def __add__( self : str , __lowercase : Tuple ):
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert self.row == another.row and self.column == another.column
# Add
snake_case_ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ = self[r, c] + another[r, c]
return result
def __neg__( self : str ):
"""simple docstring"""
snake_case_ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ = -self[r, c]
return result
def __sub__( self : List[Any] , __lowercase : Optional[Any] ):
"""simple docstring"""
return self + (-another)
def __mul__( self : Any , __lowercase : List[str] ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ): # Scalar multiplication
snake_case_ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ = self[r, c] * another
return result
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Matrix multiplication
assert self.column == another.row
snake_case_ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
snake_case_ = f"Unsupported type given for another ({type(_SCREAMING_SNAKE_CASE )})"
raise TypeError(_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ = self[r, c]
return result
def snake_case__ ( self : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
snake_case_ = v.transpose()
snake_case_ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = Matrix(3 , 3 , 0 )
for i in range(3 ):
snake_case_ = 1
print(f"a^(-1) is {ainv}" )
# u, v
snake_case_ = Matrix(3 , 1 , 0 )
snake_case_ , snake_case_ , snake_case_ = 1, 2, -3
snake_case_ = Matrix(3 , 1 , 0 )
snake_case_ , snake_case_ , snake_case_ = 4, -2, 5
print(f"u is {u}" )
print(f"v is {v}" )
print(f"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(a__ , a__ )}" )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 187 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20}
_UpperCAmelCase = do_thumbnail
_UpperCAmelCase = do_align_axis
_UpperCAmelCase = do_pad
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[str] = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 329 | 0 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class lowercase_ ( enum.Enum ):
'''simple docstring'''
__snake_case = 0
__snake_case = 1
@add_end_docstrings(lowercase )
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = 'generated'
def __init__( self : Dict , *__UpperCAmelCase : Any , **__UpperCAmelCase : str ) ->Optional[int]:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : int=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : int=None , **__UpperCAmelCase : Optional[int] , ) ->Optional[Any]:
"""simple docstring"""
a = {}
if truncation is not None:
a = truncation
a = generate_kwargs
a = {}
if return_tensors is not None and return_type is None:
a = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
a = return_type
if clean_up_tokenization_spaces is not None:
a = clean_up_tokenization_spaces
if stop_sequence is not None:
a = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ) ->str:
"""simple docstring"""
return True
def __lowerCAmelCase ( self : str , *__UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _SCREAMING_SNAKE_CASE ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
a = ([prefix + arg for arg in args[0]],)
a = True
elif isinstance(args[0] , _SCREAMING_SNAKE_CASE ):
a = (prefix + args[0],)
a = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
a = self.tokenizer(*_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : str , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : str ) ->str:
"""simple docstring"""
a = super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if (
isinstance(args[0] , _SCREAMING_SNAKE_CASE )
and all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for el in args[0] )
and all(len(_SCREAMING_SNAKE_CASE ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str]=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCAmelCase : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self._parse_and_tokenize(_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return inputs
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[Any] ) ->List[Any]:
"""simple docstring"""
if self.framework == "pt":
a , a = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
a , a = tf.shape(model_inputs['''input_ids'''] ).numpy()
a = generate_kwargs.get('''min_length''' , self.model.config.min_length )
a = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_SCREAMING_SNAKE_CASE , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
a = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
a = output_ids.shape[0]
if self.framework == "pt":
a = output_ids.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
a = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any=ReturnType.TEXT , __UpperCAmelCase : Dict=False ) ->Optional[int]:
"""simple docstring"""
a = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
a = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
a = {
F"""{self.return_name}_text""": self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
}
records.append(_SCREAMING_SNAKE_CASE )
return records
@add_end_docstrings(lowercase )
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = 'summary'
def __call__( self : List[Any] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[Any] ) ->List[Any]:
"""simple docstring"""
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) ->bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F"""consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})""" )
@add_end_docstrings(lowercase )
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = 'translation'
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] ) ->Tuple:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __lowerCAmelCase ( self : List[Any] , *__UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCAmelCase : int=None , __UpperCAmelCase : Any=None ) ->Tuple:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _SCREAMING_SNAKE_CASE ):
return self.tokenizer._build_translation_inputs(
*_SCREAMING_SNAKE_CASE , return_tensors=self.framework , truncation=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE )
else:
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[int] ) ->Any:
"""simple docstring"""
a , a , a = super()._sanitize_parameters(**_SCREAMING_SNAKE_CASE )
if src_lang is not None:
a = src_lang
if tgt_lang is not None:
a = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
a = kwargs.get('''task''' , self.task )
a = task.split('''_''' )
if task and len(_SCREAMING_SNAKE_CASE ) == 4:
# translation, XX, to YY
a = items[1]
a = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : str , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : List[str] = 'openai-gpt'
_a : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = afn
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=16 , lowerCamelCase_=[1, 2, 1] , lowerCamelCase_=[2, 2, 4] , lowerCamelCase_=2 , lowerCamelCase_=2.0 , lowerCamelCase_=True , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_="gelu" , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=0.02 , lowerCamelCase_=1E-5 , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=10 , lowerCamelCase_=8 , lowerCamelCase_=["stage1", "stage2", "stage3"] , lowerCamelCase_=[1, 2, 3] , ):
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = num_heads
a = window_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = use_absolute_embeddings
a = patch_norm
a = layer_norm_eps
a = initializer_range
a = is_training
a = scope
a = use_labels
a = type_sequence_label_size
a = encoder_stride
a = out_features
a = out_indices
def UpperCamelCase_ (self ):
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ (self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a = model(_SCREAMING_SNAKE_CASE )
a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
a = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
a = ["stem"]
a = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__A = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__A = False
__A = False
__A = False
__A = False
__A = False
def UpperCamelCase_ (self ):
"""simple docstring"""
a = MaskFormerSwinModelTester(self )
a = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with"
" `nn.DataParallel`"
) )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
def UpperCamelCase_ (self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ (self ):
"""simple docstring"""
return
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip("Swin does not use inputs_embeds" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(_SCREAMING_SNAKE_CASE )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn\'t support output_attentions" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a = outputs.hidden_states
a = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
a = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
a = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn\'t have pretrained checkpoints" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCamelCase_ ):
a = 0
return t
def check_equivalence(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_={} ):
with torch.no_grad():
a = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
a = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(lowerCamelCase_ , lowerCamelCase_ ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1E-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has'''
F''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
a = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
a = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"output_hidden_states": True} )
a = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
a = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"output_hidden_states": True} )
@require_torch
class _lowercase ( unittest.TestCase, lowerCAmelCase ):
"""simple docstring"""
__A = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__A = MaskFormerSwinConfig
def UpperCamelCase_ (self ):
"""simple docstring"""
a = MaskFormerSwinModelTester(self )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
a = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
a = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
a = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
a , a , a = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
a = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 227 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
| 329 | 0 |
'''simple docstring'''
lowercase__ : Optional[int] = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
lowercase__ : Dict = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def a__ ( lowercase : float, lowercase : str, lowercase : str ) -> float:
"""simple docstring"""
_UpperCamelCase = from_type.lower().strip('''s''' )
_UpperCamelCase = to_type.lower().strip('''s''' )
_UpperCamelCase = UNIT_SYMBOL.get(a__, a__ )
_UpperCamelCase = UNIT_SYMBOL.get(a__, a__ )
if from_sanitized not in METRIC_CONVERSION:
_UpperCamelCase = (
F"""Invalid \'from_type\' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(a__ )}"""
)
raise ValueError(a__ )
if to_sanitized not in METRIC_CONVERSION:
_UpperCamelCase = (
F"""Invalid \'to_type\' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(a__ )}"""
)
raise ValueError(a__ )
_UpperCamelCase = METRIC_CONVERSION[from_sanitized]
_UpperCamelCase = METRIC_CONVERSION[to_sanitized]
_UpperCamelCase = 1
if from_exponent > to_exponent:
_UpperCamelCase = from_exponent - to_exponent
else:
_UpperCamelCase = -(to_exponent - from_exponent)
return value * pow(10, a__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 324 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ :Optional[int] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any:
'''simple docstring'''
require_version(deps[pkg] , a__ )
| 329 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
__A : Optional[int] = int(a__ )
assert noofclusters < len(a__ )
# Find out the dimensionality
__A : Tuple = len(vectors[0] )
# Will help select random centroids from among the available vectors
__A : List[str] = list(range(len(a__ ) ) )
shuffle(a__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__A : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__A : Tuple = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__A : Optional[Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(a__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__A : List[str] = tf.placeholder('float64' , [dim] )
__A : Any = []
for centroid in centroids:
cent_assigns.append(tf.assign(a__ , a__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__A : Optional[Any] = [tf.Variable(0 ) for i in range(len(a__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__A : Dict = tf.placeholder('int32' )
__A : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(a__ , a__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__A : Dict = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__A : List[str] = tf.reduce_mean(a__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__A : str = tf.placeholder('float' , [dim] )
__A : str = tf.placeholder('float' , [dim] )
__A : str = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(a__ , a__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__A : List[Any] = tf.placeholder('float' , [noofclusters] )
__A : Any = tf.argmin(a__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__A : Tuple = tf.initialize_all_variables()
# Initialize all variables
sess.run(a__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__A : Tuple = 1_00
for _ in range(a__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(a__ ) ):
__A : str = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__A : Union[str, Any] = [
sess.run(a__ , feed_dict={va: vect, va: sess.run(a__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__A : List[Any] = sess.run(
a__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(a__ ):
# Collect all the vectors assigned to this cluster
__A : Union[str, Any] = [
vectors[i]
for i in range(len(a__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__A : str = sess.run(
a__ , feed_dict={mean_input: array(a__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__A : Optional[Any] = sess.run(a__ )
__A : Dict = sess.run(a__ )
return centroids, assignments
| 280 |
from __future__ import annotations
def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start]
while stack:
_UpperCAmelCase = stack.pop()
explored.add(a__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a__ )
return explored
lowerCAmelCase__ :Tuple = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 329 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : int , *_a : str , **_a : Dict ) -> Dict:
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _lowercase ( self : Any , _a : str=None , _a : Union[str, Any]=None , _a : Optional[Any]=None ) -> int:
__lowerCamelCase : Union[str, Any] = {}
__lowerCamelCase : Dict = {}
if prompt is not None:
__lowerCamelCase : Tuple = prompt
if generate_kwargs is not None:
__lowerCamelCase : Optional[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__lowerCamelCase : Any = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__lowerCamelCase : List[str] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Union[str, Any] , _a : str , **_a : List[Any] ) -> Optional[Any]:
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowercase ( self : int , _a : str , _a : Tuple=None ) -> int:
__lowerCamelCase : Union[str, Any] = load_image(_SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
__lowerCamelCase : Optional[Any] = self.model.config.model_type
if model_type == "git":
__lowerCamelCase : str = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCamelCase : List[Any] = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids
__lowerCamelCase : str = [self.tokenizer.cls_token_id] + input_ids
__lowerCamelCase : Optional[Any] = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__lowerCamelCase : int = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__lowerCamelCase : List[str] = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCamelCase : Tuple = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
__lowerCamelCase : Optional[Any] = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__lowerCamelCase : str = None
return model_inputs
def _lowercase ( self : Optional[int] , _a : Optional[int] , _a : str=None ) -> List[str]:
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['input_ids'] )
):
__lowerCamelCase : Optional[Any] = None
if generate_kwargs is None:
__lowerCamelCase : List[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__lowerCamelCase : Any = model_inputs.pop(self.model.main_input_name )
__lowerCamelCase : Optional[Any] = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_outputs
def _lowercase ( self : Dict , _a : int ) -> Dict:
__lowerCamelCase : Optional[Any] = []
for output_ids in model_outputs:
__lowerCamelCase : str = {
'generated_text': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , )
}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 208 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 329 | 0 |
from typing import List
import numpy as np
def __lowerCAmelCase ( a__ ) -> int:
__a = {key: len(a__ ) for key, value in gen_kwargs.items() if isinstance(a__ , a__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
__a = max(lists_lengths.values() , default=0 )
return max(1 , a__ )
def __lowerCAmelCase ( a__ , a__ ) -> List[range]:
__a = []
for group_idx in range(a__ ):
__a = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__a = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__a = range(a__ , start + num_shards_to_add )
shards_indices_per_group.append(a__ )
return shards_indices_per_group
def __lowerCAmelCase ( a__ , a__ ) -> List[dict]:
__a = _number_of_shards_in_gen_kwargs(a__ )
if num_shards == 1:
return [dict(a__ )]
else:
__a = _distribute_shards(num_shards=a__ , max_num_jobs=a__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(a__ , a__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(a__ ) )
]
def __lowerCAmelCase ( a__ ) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , a__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowerCAmelCase ( a__ , a__ ) -> dict:
__a = {len(a__ ) for value in gen_kwargs.values() if isinstance(a__ , a__ )}
__a = {}
for size in list_sizes:
__a = list(range(a__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__a = dict(a__ )
for key, value in shuffled_kwargs.items():
if isinstance(a__ , a__ ):
__a = [value[i] for i in indices_per_size[len(a__ )]]
return shuffled_kwargs | 6 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ :int = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class __a ( UpperCAmelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if prompt is not None:
_UpperCAmelCase = prompt
if generate_kwargs is not None:
_UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
_UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
_UpperCAmelCase = self.model.config.model_type
if model_type == "git":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids
_UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_UpperCAmelCase = None
return model_inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['input_ids'] )
):
_UpperCAmelCase = None
if generate_kwargs is None:
_UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCAmelCase = model_inputs.pop(self.model.main_input_name )
_UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
for output_ids in model_outputs:
_UpperCAmelCase = {
'generated_text': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , )
}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 329 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'openai-gpt'
lowerCamelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=4_0478, __a=512, __a=768, __a=12, __a=12, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=1E-5, __a=0.02, __a="cls_index", __a=True, __a=None, __a=True, __a=0.1, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : int = n_positions
_lowerCAmelCase : Union[str, Any] = n_embd
_lowerCAmelCase : Optional[Any] = n_layer
_lowerCAmelCase : Optional[int] = n_head
_lowerCAmelCase : List[Any] = afn
_lowerCAmelCase : Union[str, Any] = resid_pdrop
_lowerCAmelCase : Tuple = embd_pdrop
_lowerCAmelCase : List[Any] = attn_pdrop
_lowerCAmelCase : Tuple = layer_norm_epsilon
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : str = summary_type
_lowerCAmelCase : List[Any] = summary_use_proj
_lowerCAmelCase : List[Any] = summary_activation
_lowerCAmelCase : Optional[Any] = summary_first_dropout
_lowerCAmelCase : List[str] = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE)
| 36 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase = take_from
_UpperCAmelCase = ()
if not isinstance(args[0] , a__ ):
_UpperCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
_UpperCAmelCase = None
if isinstance(a__ , a__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(a__ ),)
_UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(a__ , a__ ):
values += (getattr(a__ , a__ ),)
_UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_UpperCAmelCase = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , a__ , stacklevel=a__ )
if isinstance(a__ , a__ ) and len(a__ ) > 0:
_UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase = call_frame.filename
_UpperCAmelCase = call_frame.lineno
_UpperCAmelCase = call_frame.function
_UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(a__ ) == 0:
return
elif len(a__ ) == 1:
return values[0]
return values
| 329 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase__ :
"""simple docstring"""
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
return None
class lowerCAmelCase__ :
"""simple docstring"""
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return None
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_SCREAMING_SNAKE_CASE , """tf""" , 12 , **_SCREAMING_SNAKE_CASE )
@require_torch
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_SCREAMING_SNAKE_CASE , """pt""" , 12 , **_SCREAMING_SNAKE_CASE )
@require_torch
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
from transformers import BertModel
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
vocab_file.flush()
__SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(_SCREAMING_SNAKE_CASE ) ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
self._test_export(_SCREAMING_SNAKE_CASE , """pt""" , 12 , _SCREAMING_SNAKE_CASE )
@require_tf
@slow
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__SCREAMING_SNAKE_CASE = self._test_export(_SCREAMING_SNAKE_CASE , """tf""" , 12 , **_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = quantize(Path(_SCREAMING_SNAKE_CASE ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_SCREAMING_SNAKE_CASE ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__SCREAMING_SNAKE_CASE = self._test_export(_SCREAMING_SNAKE_CASE , """pt""" , 12 , **_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = quantize(_SCREAMING_SNAKE_CASE )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_SCREAMING_SNAKE_CASE ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
__SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return path
except Exception as e:
self.fail(_SCREAMING_SNAKE_CASE )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
from transformers import BertModel
__SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pt""" )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
from transformers import TFBertModel
__SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """tf""" )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = infer_shapes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Assert all variables are present
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(variable_names[3:] , _SCREAMING_SNAKE_CASE )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask""", """token_type_ids"""]
__SCREAMING_SNAKE_CASE = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_SCREAMING_SNAKE_CASE ) , set(_SCREAMING_SNAKE_CASE ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_SCREAMING_SNAKE_CASE , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 267 |
import math
lowerCAmelCase__ :Optional[int] = 1_0
lowerCAmelCase__ :Optional[Any] = 7
lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( a__: int = 2_0 ) -> str:
'''simple docstring'''
_UpperCAmelCase = math.comb(a__ , a__ )
_UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ )
_UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 329 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ = VQModel
lowerCAmelCase__ = 'sample'
@property
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[str]=(32, 32) )-> Any:
__A = 4
__A = 3
__A = floats_tensor((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
return {"sample": image}
@property
def _lowerCAmelCase (self :Any )-> List[Any]:
return (3, 32, 32)
@property
def _lowerCAmelCase (self :str )-> str:
return (3, 32, 32)
def _lowerCAmelCase (self :Union[str, Any] )-> Tuple:
__A = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
__A = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase (self :List[str] )-> Union[str, Any]:
pass
def _lowerCAmelCase (self :Optional[Any] )-> Tuple:
pass
def _lowerCAmelCase (self :int )-> str:
__A , __A = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
__A = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowerCAmelCase (self :int )-> str:
__A = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(_SCREAMING_SNAKE_CASE ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__A = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__A = image.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__A = model(_SCREAMING_SNAKE_CASE ).sample
__A = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__A = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 117 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : torch.FloatTensor
UpperCAmelCase__ : Optional[torch.FloatTensor] = None
def snake_case ( UpperCAmelCase , UpperCAmelCase=0.999 , UpperCAmelCase="cosine" , )-> Union[str, Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__A = []
for i in range(a__ ):
__A = i / num_diffusion_timesteps
__A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a__ ) / alpha_bar_fn(a__ ) , a__ ) )
return torch.tensor(a__ , dtype=torch.floataa )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
@register_to_config
def __init__( self :str , _A :int = 1_000 , _A :Union[str, Any] = "fixed_small_log" , _A :List[Any] = True , _A :Optional[Any] = 1.0 , _A :Optional[int] = "epsilon" , _A :Optional[int] = "squaredcos_cap_v2" , ) -> List[Any]:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
__A = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE )
__A = 1.0 - self.betas
__A = torch.cumprod(self.alphas , dim=0 )
__A = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__A = 1.0
# setable values
__A = None
__A = torch.from_numpy(np.arange(0 , _SCREAMING_SNAKE_CASE )[::-1].copy() )
__A = variance_type
def lowercase_ ( self :Tuple , _A :Optional[int] , _A :Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowercase_ ( self :Any , _A :List[str] , _A :Optional[Any] = None ) -> Optional[Any]:
'''simple docstring'''
__A = num_inference_steps
__A = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__A = (np.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__A = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
def lowercase_ ( self :str , _A :Dict , _A :Dict=None , _A :Union[str, Any]=None , _A :Optional[Any]=None ) -> Tuple:
'''simple docstring'''
if prev_timestep is None:
__A = t - 1
__A = self.alphas_cumprod[t]
__A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__A = 1 - alpha_prod_t
__A = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__A = self.betas[t]
else:
__A = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__A = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__A = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__A = torch.log(torch.clamp(_SCREAMING_SNAKE_CASE , min=1E-20 ) )
__A = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__A = variance.log()
__A = beta.log()
__A = (predicted_variance + 1) / 2
__A = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ ( self :Dict , _A :int , _A :Tuple , _A :str , _A :List[Any] = None , _A :Union[str, Any]=None , _A :Tuple = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
__A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__A , __A = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
__A = None
# 1. compute alphas, betas
if prev_timestep is None:
__A = t - 1
__A = self.alphas_cumprod[t]
__A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__A = 1 - alpha_prod_t
__A = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__A = self.betas[t]
__A = self.alphas[t]
else:
__A = 1 - alpha_prod_t / alpha_prod_t_prev
__A = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__A = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__A = torch.clamp(
_SCREAMING_SNAKE_CASE , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__A = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__A = 0
if t > 0:
__A = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE , device=model_output.device )
__A = self._get_variance(
_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE , )
if self.variance_type == "fixed_small_log":
__A = variance
elif self.variance_type == "learned_range":
__A = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
' for the UnCLIPScheduler.' )
__A = variance * variance_noise
__A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Dict , _A :List[Any] , _A :Optional[Any] , _A :Optional[Any] , ) -> torch.FloatTensor:
'''simple docstring'''
__A = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__A = timesteps.to(original_samples.device )
__A = alphas_cumprod[timesteps] ** 0.5
__A = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__A = sqrt_alpha_prod.unsqueeze(-1 )
__A = (1 - alphas_cumprod[timesteps]) ** 0.5
__A = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__A = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__A = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 161 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoConfig.from_pretrained(a__ )
_UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ )
_UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ )
_UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
_UpperCAmelCase = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_UpperCAmelCase = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_global_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = tax_mlp_layer_norm
_UpperCAmelCase = flax_model_encoder_layer_block
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_global_rel_embedding
# Assigning
_UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale']
_UpperCAmelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
_UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_pre_attention_layer_norm
_UpperCAmelCase = tax_enc_dec_attention_key
_UpperCAmelCase = tax_enc_dec_attention_out
_UpperCAmelCase = tax_enc_dec_attention_query
_UpperCAmelCase = tax_enc_dec_attention_value
_UpperCAmelCase = tax_cross_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = txa_mlp_layer_norm
_UpperCAmelCase = flax_model_decoder_layer_block
# Decoder Normalization
_UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale']
_UpperCAmelCase = txa_decoder_norm
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_decoder_rel_embedding
# Token Embeddings
_UpperCAmelCase = tax_model['target']['token_embedder']['embedding']
_UpperCAmelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(a__ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
lowerCAmelCase__ :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowerCAmelCase__ :List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 329 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "width_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __lowercase : Tuple , __lowercase : Dict=13 , __lowercase : Any=64 , __lowercase : str=2 , __lowercase : List[Any]=3 , __lowercase : int="swish" , __lowercase : int=3 , __lowercase : List[Any]=32 , __lowercase : Tuple=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Optional[Any]=10 , __lowercase : List[str]=None , __lowercase : Tuple=0.25 , __lowercase : List[Any]=0.0 , __lowercase : Optional[Any]=0.0 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = make_divisible(5_12 * width_multiplier , divisor=8 )
snake_case_ = hidden_act
snake_case_ = conv_kernel_size
snake_case_ = output_stride
snake_case_ = classifier_dropout_prob
snake_case_ = use_labels
snake_case_ = is_training
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = width_multiplier
snake_case_ = ffn_dropout
snake_case_ = attn_dropout
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def snake_case__ ( self : str , __lowercase : int , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Optional[int] ):
"""simple docstring"""
snake_case_ = MobileViTVaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : str , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MobileViTVaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Dict , __lowercase : Dict , __lowercase : Any , __lowercase : Dict , __lowercase : int ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MobileViTVaForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = MobileViTVaModelTester(self )
snake_case_ = MobileViTVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case__ ( self : Dict ):
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
def check_hidden_states_output(__lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Union[str, Any] ):
snake_case_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
snake_case_ = outputs.hidden_states
snake_case_ = 5
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case_ = 2
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
@slow
def snake_case__ ( self : Tuple ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MobileViTVaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
_SCREAMING_SNAKE_CASE )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
snake_case_ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case_ = model.to(_SCREAMING_SNAKE_CASE )
snake_case_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits
# verify the logits
snake_case_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
snake_case_ = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case_ = model.to(_SCREAMING_SNAKE_CASE )
snake_case_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits.detach().cpu()
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE , target_sizes=[(50, 60)] )
snake_case_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE )
snake_case_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
| 187 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : str = 'ctrl'
_a : Tuple = ['past_key_values']
_a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
'''simple docstring'''
@staticmethod
def __lowerCAmelCase ( *__UpperCAmelCase : int , **__UpperCAmelCase : Optional[Any] ) ->int:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
a = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
a = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
a = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
@require_tf
def __lowerCAmelCase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
a = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
a = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
a = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
a = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
a = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
a = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
a = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
a = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : str = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'image_id': 39769, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 329 | 0 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: int = logging.get_logger(__name__)
def a( A : Any ) -> List[str]:
"""simple docstring"""
a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a = 128
elif "12-12" in model_name:
a = 12
a = 12
elif "14-14" in model_name:
a = 14
a = 14
elif "16-16" in model_name:
a = 16
a = 16
else:
raise ValueError("Model not supported" )
a = "huggingface/label-files"
if "speech-commands" in model_name:
a = 35
a = "speech-commands-v2-id2label.json"
else:
a = 527
a = "audioset-id2label.json"
a = json.load(open(hf_hub_download(a__ , a__ , repo_type="dataset" ) , "r" ) )
a = {int(a__ ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
return config
def a( A : int ) -> Optional[Any]:
"""simple docstring"""
if "module.v" in name:
a = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
a = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
a = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
a = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
a = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a = name.replace("attn" , "attention.self" )
if "norm1" in name:
a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
a = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
a = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def a( A : str , A : Optional[Any] ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(a__ )
if "qkv" in key:
a = key.split("." )
a = int(key_split[3] )
a = config.hidden_size
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
else:
a = val
return orig_state_dict
def a( A : List[Any] ) -> List[Any]:
"""simple docstring"""
a = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
@torch.no_grad()
def a( A : Optional[Any] , A : Optional[int] , A : int=False ) -> int:
"""simple docstring"""
a = get_audio_spectrogram_transformer_config(a__ )
a = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
a = model_name_to_url[model_name]
a = torch.hub.load_state_dict_from_url(a__ , map_location="cpu" )
# remove some keys
remove_keys(a__ )
# rename some keys
a = convert_state_dict(a__ , a__ )
# load 🤗 model
a = ASTForAudioClassification(a__ )
model.eval()
model.load_state_dict(a__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a = -4.2_677_393 if "speech-commands" not in model_name else -6.845_978
a = 4.5_689_974 if "speech-commands" not in model_name else 5.5_654_526
a = 1024 if "speech-commands" not in model_name else 128
a = ASTFeatureExtractor(mean=a__ , std=a__ , max_length=a__ )
if "speech-commands" in model_name:
a = load_dataset("speech_commands" , "v0.02" , split="validation" )
a = dataset[0]["audio"]["array"]
else:
a = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
a , a = torchaudio.load(a__ )
a = waveform.squeeze().numpy()
a = feature_extractor(a__ , sampling_rate=1_6000 , return_tensors="pt" )
# forward pass
a = model(**a__ )
a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
a = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , a__ , atol=1e-4 ):
raise ValueError("Logits don\'t match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(a__ ).mkdir(exist_ok=a__ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(a__ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_lowercase: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase: Optional[Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 227 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a ( unittest.TestCase ):
_a : List[str] = JukeboxTokenizer
_a : List[Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 329 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'data2vec-text'
def __init__( self : Tuple , lowerCAmelCase__ : str=30522 , lowerCAmelCase__ : Optional[Any]=768 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : Union[str, Any]=12 , lowerCAmelCase__ : Dict=3072 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Dict=512 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : Union[str, Any]=1 , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : List[Any]="absolute" , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Union[str, Any]=None , **lowerCAmelCase__ : str , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def snake_case__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 324 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_UpperCAmelCase = parser.parse_args()
return args
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
def fn(a__: str ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase__ ( a__: List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_UpperCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_UpperCAmelCase = tf.train.Features(feature=a__ )
_UpperCAmelCase = tf.train.Example(features=a__ )
_UpperCAmelCase = example.SerializeToString()
records.append(a__ )
return records
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCAmelCase = min(len(a__ ) , args.limit )
_UpperCAmelCase = dataset.select(range(a__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCAmelCase = tokenize_function(a__ )
_UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a__: Optional[int] ):
# Concatenate all texts.
_UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
_UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCAmelCase = len(dataset_snapshot['input_ids'] )
_UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCAmelCase = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
_UpperCAmelCase = serialized_examples[i]
out_file.write(a__ )
print('Wrote file {} containing {} records'.format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = parse_args()
main(args)
| 329 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__A : Optional[int] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )]
__A : Union[str, Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A , __A : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE )
__A : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : Optional[Any] = jax.random.PRNGKey(0 )
__A : List[str] = 4
__A : Optional[Any] = jax.device_count()
__A : List[Any] = num_samples * [prompt]
__A : Optional[int] = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
__A : List[str] = replicate(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : int = shard(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
__A : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def UpperCAmelCase_ ( self ):
__A , __A : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE )
__A : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : List[str] = jax.random.PRNGKey(0 )
__A : Any = 50
__A : Any = jax.device_count()
__A : List[str] = num_samples * [prompt]
__A : List[Any] = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
__A : List[Any] = replicate(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : Optional[int] = shard(_SCREAMING_SNAKE_CASE )
__A : Dict = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def UpperCAmelCase_ ( self ):
__A , __A : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : str = jax.random.PRNGKey(0 )
__A : Union[str, Any] = 50
__A : Optional[Any] = jax.device_count()
__A : Any = num_samples * [prompt]
__A : List[Any] = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
__A : Union[str, Any] = replicate(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : Dict = shard(_SCREAMING_SNAKE_CASE )
__A : List[Any] = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def UpperCAmelCase_ ( self ):
__A , __A : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
__A : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : Any = jax.random.PRNGKey(0 )
__A : List[str] = 50
__A : Tuple = jax.device_count()
__A : Optional[Any] = num_samples * [prompt]
__A : Any = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
__A : Any = replicate(_SCREAMING_SNAKE_CASE )
__A : str = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = shard(_SCREAMING_SNAKE_CASE )
__A : Tuple = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def UpperCAmelCase_ ( self ):
__A : List[Any] = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
__A , __A : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
__A : Dict = scheduler.create_state()
__A : int = scheduler_state
__A : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : Union[str, Any] = jax.random.PRNGKey(0 )
__A : Any = 50
__A : Optional[int] = jax.device_count()
__A : Optional[int] = num_samples * [prompt]
__A : Union[str, Any] = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
__A : Any = replicate(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = shard(_SCREAMING_SNAKE_CASE )
__A : List[str] = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def UpperCAmelCase_ ( self ):
__A : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : List[str] = jax.device_count()
__A : Any = num_samples * [prompt]
__A : List[Any] = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
__A , __A : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
__A : Any = replicate(_SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
__A : str = shard(_SCREAMING_SNAKE_CASE )
__A : int = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__A : Tuple = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__A , __A : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
__A : int = replicate(_SCREAMING_SNAKE_CASE )
__A : Any = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
__A : str = shard(_SCREAMING_SNAKE_CASE )
__A : int = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__A : List[str] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 280 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = []
for step in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(a__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , a__ )
_UpperCAmelCase = torch.load(a__ )
scheduler.load_state_dict(a__ )
return lrs
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __a ( unittest.TestCase ):
_a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None
_a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_a : List[Any] = 10
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' )
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = fn
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ ='ctrl'
a_ =['past_key_values']
a_ ={
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[str] , _a : str=24_6534 , _a : List[Any]=256 , _a : int=1280 , _a : Optional[int]=8192 , _a : str=48 , _a : List[str]=16 , _a : List[Any]=0.1 , _a : str=0.1 , _a : Tuple=1e-6 , _a : Any=0.02 , _a : Tuple=True , **_a : Optional[Any] , ) -> List[str]:
__lowerCamelCase : int = vocab_size
__lowerCamelCase : int = n_positions
__lowerCamelCase : Optional[Any] = n_embd
__lowerCamelCase : Tuple = n_layer
__lowerCamelCase : Tuple = n_head
__lowerCamelCase : Any = dff
__lowerCamelCase : Tuple = resid_pdrop
__lowerCamelCase : Union[str, Any] = embd_pdrop
__lowerCamelCase : Union[str, Any] = layer_norm_epsilon
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Dict = use_cache
super().__init__(**_SCREAMING_SNAKE_CASE )
| 208 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :Any = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = original_name.split('.' )[0]
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] )
_UpperCAmelCase = orig_block_num - offset
_UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_UpperCAmelCase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase = key[: key.find('proj' )]
_UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' )
_UpperCAmelCase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' )
if "norm2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = model_name[-3:]
_UpperCAmelCase = 1_0_0_0
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_0_0_0)
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase = [2, 2, 6, 2]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s24":
_UpperCAmelCase = [4, 4, 1_2, 4]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.9
elif size == "m36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
elif size == "m48":
_UpperCAmelCase = [8, 8, 2_4, 8]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
# Prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(a__ )
# create HuggingFace model and load state dict
_UpperCAmelCase = PoolFormerForImageClassification(a__ )
model.load_state_dict(a__ )
model.eval()
# Define image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_UpperCAmelCase = model(a__ )
_UpperCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
from collections.abc import Sequence
def __lowerCAmelCase ( a__ = None ) -> int:
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
__a = nums[0]
for i in range(1 , len(a__ ) ):
__a = nums[i]
__a = max(a__ , ans + num , a__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
A : Any = int(input('Enter number of elements : ').strip())
A : Dict = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array)) | 6 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = process
_UpperCAmelCase = params
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dataset[i]
_UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params )
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = loader
_UpperCAmelCase = infer
_UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase = None
_UpperCAmelCase = loader_batch_size
# Internal bookkeeping
_UpperCAmelCase = None
_UpperCAmelCase = None
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase = next(self.iterator )
_UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase = processed
_UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
_UpperCAmelCase = None
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.subiterator is None:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCAmelCase = next(self.subiterator )
return processed
class __a ( UpperCAmelCase ):
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
_UpperCAmelCase = processed
_UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_UpperCAmelCase = processed
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
return accumulator
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = key
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = keya
_UpperCAmelCase = keya
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 329 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a = 13, __a = 64, __a = 2, __a = 3, __a = 3, __a = True, __a = True, __a = 128, __a=[16, 32, 64, 128], __a = 7, __a = 4, __a = 37, __a = "gelu", __a = 0.1, __a = 0.1, __a = 10, __a = 0.02, __a = 2, __a = 1, __a = 128, __a = [2, 2, 2, 2], __a = 2, __a = 2, ):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : Optional[int] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : int = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = type_sequence_label_size
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = encoder_stride
_lowerCAmelCase : List[str] = num_attention_outputs
_lowerCAmelCase : Union[str, Any] = embed_dim
_lowerCAmelCase : str = embed_dim + 1
_lowerCAmelCase : Optional[Any] = resolution
_lowerCAmelCase : List[str] = depths
_lowerCAmelCase : List[Any] = hidden_sizes
_lowerCAmelCase : List[str] = dim
_lowerCAmelCase : Optional[Any] = mlp_expansion_ratio
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=_SCREAMING_SNAKE_CASE, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, resolution=self.resolution, depths=self.depths, hidden_sizes=self.hidden_sizes, dim=self.dim, mlp_expansion_ratio=self.mlp_expansion_ratio, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFEfficientFormerModel(config=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE, training=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = self.type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = TFEfficientFormerForImageClassification(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE, labels=_SCREAMING_SNAKE_CASE, training=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : int = TFEfficientFormerForImageClassification(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE, labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = TFEfficientFormerModelTester(self)
_lowerCAmelCase : Optional[Any] = ConfigTester(
self, config_class=_SCREAMING_SNAKE_CASE, has_text_modality=_SCREAMING_SNAKE_CASE, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : str = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : int = [*signature.parameters.keys()]
_lowerCAmelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1], _SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
def check_hidden_states_output(__a, __a, __a):
_lowerCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[str] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE), training=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : Optional[Any] = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_SCREAMING_SNAKE_CASE), _SCREAMING_SNAKE_CASE)
if hasattr(self.model_tester, "encoder_seq_length"):
_lowerCAmelCase : Union[str, Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1:
_lowerCAmelCase : Tuple = seq_length * self.model_tester.chunk_length
else:
_lowerCAmelCase : List[str] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]), [seq_length, self.model_tester.hidden_size], )
if config.is_encoder_decoder:
_lowerCAmelCase : Dict = outputs.decoder_hidden_states
self.asseretIsInstance(_SCREAMING_SNAKE_CASE, (list, tuple))
self.assertEqual(len(_SCREAMING_SNAKE_CASE), _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : str = getattr(self.model_tester, "seq_length", _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[str] = getattr(self.model_tester, "decoder_seq_length", _SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], )
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Optional[Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : int = super()._prepare_for_class(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, return_labels=_SCREAMING_SNAKE_CASE)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = TFEfficientFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : int = getattr(self.model_tester, "seq_length", _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Union[str, Any] = getattr(self.model_tester, "encoder_seq_length", _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Optional[int] = getattr(self.model_tester, "key_length", _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Tuple = getattr(self.model_tester, "chunk_length", _SCREAMING_SNAKE_CASE)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
_lowerCAmelCase : Union[str, Any] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_lowerCAmelCase : int = True
_lowerCAmelCase : str = False
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE), training=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE), self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE), training=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE), self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_lowerCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_lowerCAmelCase : int = {
key: tf.keras.Input(shape=val.shape[1:], dtype=val.dtype, name=_SCREAMING_SNAKE_CASE)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE)
self.assertTrue(outputs_dict is not None)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300")
if is_vision_available()
else None
)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300")
_lowerCAmelCase : Union[str, Any] = self.default_image_processor
_lowerCAmelCase : Tuple = prepare_img()
_lowerCAmelCase : Optional[Any] = image_processor(images=_SCREAMING_SNAKE_CASE, return_tensors="tf")
# forward pass
_lowerCAmelCase : Optional[Any] = model(**_SCREAMING_SNAKE_CASE, training=_SCREAMING_SNAKE_CASE)
# verify the logits
_lowerCAmelCase : Any = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape, _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[str] = tf.constant([-0.0_555, 0.4_825, -0.0_852])
self.assertTrue(np.allclose(outputs.logits[0, :3], _SCREAMING_SNAKE_CASE, atol=1E-4))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300")
_lowerCAmelCase : Union[str, Any] = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE, return_tensors="tf")
# forward pass
_lowerCAmelCase : List[Any] = model(**_SCREAMING_SNAKE_CASE, training=_SCREAMING_SNAKE_CASE)
# verify the logits
_lowerCAmelCase : Union[str, Any] = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape, _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Optional[Any] = tf.constant([-0.1_312, 0.4_353, -1.0_499])
self.assertTrue(np.allclose(outputs.logits[0, :3], _SCREAMING_SNAKE_CASE, atol=1E-4))
| 36 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __a ( UpperCAmelCase ):
_a : str = 'data2vec-text'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 329 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = ['image_processor', 'tokenizer']
lowerCAmelCase__ = 'BlipImageProcessor'
lowerCAmelCase__ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = False
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self : Optional[int] , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = None , __SCREAMING_SNAKE_CASE : int = True , __SCREAMING_SNAKE_CASE : str = False , __SCREAMING_SNAKE_CASE : List[str] = None , __SCREAMING_SNAKE_CASE : List[str] = None , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Dict = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = None , __SCREAMING_SNAKE_CASE : Tuple = False , __SCREAMING_SNAKE_CASE : int = False , __SCREAMING_SNAKE_CASE : Any = False , __SCREAMING_SNAKE_CASE : int = False , __SCREAMING_SNAKE_CASE : Tuple = False , __SCREAMING_SNAKE_CASE : Optional[Any] = True , __SCREAMING_SNAKE_CASE : List[str] = None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
__SCREAMING_SNAKE_CASE = self.tokenizer
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
return text_encoding
# add pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = None
if text_encoding is not None:
encoding_image_processor.update(_SCREAMING_SNAKE_CASE )
return encoding_image_processor
def UpperCAmelCase__ ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 267 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_a : Optional[int] = False
_a : List[str] = False
_a : List[str] = False
_a : Optional[int] = False
_a : Tuple = False
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has'''
f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class __a ( unittest.TestCase , UpperCAmelCase ):
_a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_a : Any = MaskFormerSwinConfig
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Any = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 117 |
from collections.abc import Generator
def lowerCAmelCase__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 329 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Optional[int] = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : Tuple = 'time_series_transformer'
UpperCAmelCase__ : str = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self :List[str] , _A :List[str] = None , _A :str = None , _A :Dict = "student_t" , _A :Optional[int] = "nll" , _A :Tuple = 1 , _A :Union[str, Any] = [1, 2, 3, 4, 5, 6, 7] , _A :Tuple = "mean" , _A :List[str] = 0 , _A :Union[str, Any] = 0 , _A :str = 0 , _A :List[str] = 0 , _A :Optional[Any] = None , _A :List[str] = None , _A :Optional[int] = 32 , _A :str = 32 , _A :Union[str, Any] = 2 , _A :Any = 2 , _A :List[str] = 2 , _A :str = 2 , _A :Any = True , _A :Dict = "gelu" , _A :Union[str, Any] = 64 , _A :List[str] = 0.1 , _A :Any = 0.1 , _A :str = 0.1 , _A :int = 0.1 , _A :Optional[Any] = 0.1 , _A :Dict = 100 , _A :int = 0.02 , _A :str=True , **_A :Optional[int] , ) -> List[Any]:
'''simple docstring'''
__A = prediction_length
__A = context_length or prediction_length
__A = distribution_output
__A = loss
__A = input_size
__A = num_time_features
__A = lags_sequence
__A = scaling
__A = num_dynamic_real_features
__A = num_static_real_features
__A = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__A = cardinality
else:
__A = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__A = embedding_dimension
else:
__A = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__A = num_parallel_samples
# Transformer architecture configuration
__A = input_size * len(_SCREAMING_SNAKE_CASE ) + self._number_of_features
__A = d_model
__A = encoder_attention_heads
__A = decoder_attention_heads
__A = encoder_ffn_dim
__A = decoder_ffn_dim
__A = encoder_layers
__A = decoder_layers
__A = dropout
__A = attention_dropout
__A = activation_dropout
__A = encoder_layerdrop
__A = decoder_layerdrop
__A = activation_function
__A = init_std
__A = use_cache
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def lowercase_ ( self :List[Any] ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 161 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 30}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 329 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = AutoConfig.from_pretrained(a__ )
snake_case_ = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ )
snake_case_ = checkpoints.load_tax_checkpoint(a__ )
snake_case_ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
snake_case_ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case_ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`"
" attribute with a value from [\'local\', \'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
snake_case_ = f"layers_{str(a__ )}"
# Self-Attention
snake_case_ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
snake_case_ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
snake_case_ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
snake_case_ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
snake_case_ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
snake_case_ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
snake_case_ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
snake_case_ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
snake_case_ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
snake_case_ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
snake_case_ = flax_model.params["encoder"]["block"][str(a__ )]["layer"]
snake_case_ = tax_attention_key
snake_case_ = tax_attention_out
snake_case_ = tax_attention_query
snake_case_ = tax_attention_value
snake_case_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = tax_global_layer_norm
if split_mlp_wi:
snake_case_ = tax_mlp_wi_a
snake_case_ = tax_mlp_wi_a
else:
snake_case_ = tax_mlp_wi
snake_case_ = tax_mlp_wo
snake_case_ = tax_mlp_layer_norm
snake_case_ = flax_model_encoder_layer_block
# Only for layer 0:
snake_case_ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
snake_case_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
snake_case_ = tax_encoder_global_rel_embedding
# Assigning
snake_case_ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
snake_case_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case_ = f"layers_{str(a__ )}"
# Self-Attention
snake_case_ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
snake_case_ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
snake_case_ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
snake_case_ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
snake_case_ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
snake_case_ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
snake_case_ = tax_enc_dec_attention_module["key"]["kernel"]
snake_case_ = tax_enc_dec_attention_module["out"]["kernel"]
snake_case_ = tax_enc_dec_attention_module["query"]["kernel"]
snake_case_ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
snake_case_ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
snake_case_ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
snake_case_ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
snake_case_ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
snake_case_ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
snake_case_ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
snake_case_ = flax_model.params["decoder"]["block"][str(a__ )]["layer"]
snake_case_ = tax_attention_key
snake_case_ = tax_attention_out
snake_case_ = tax_attention_query
snake_case_ = tax_attention_value
snake_case_ = tax_pre_attention_layer_norm
snake_case_ = tax_enc_dec_attention_key
snake_case_ = tax_enc_dec_attention_out
snake_case_ = tax_enc_dec_attention_query
snake_case_ = tax_enc_dec_attention_value
snake_case_ = tax_cross_layer_norm
if split_mlp_wi:
snake_case_ = tax_mlp_wi_a
snake_case_ = tax_mlp_wi_a
else:
snake_case_ = tax_mlp_wi
snake_case_ = tax_mlp_wo
snake_case_ = txa_mlp_layer_norm
snake_case_ = flax_model_decoder_layer_block
# Decoder Normalization
snake_case_ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
snake_case_ = txa_decoder_norm
# Only for layer 0:
snake_case_ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
snake_case_ = tax_decoder_rel_embedding
# Token Embeddings
snake_case_ = tax_model["target"]["token_embedder"]["embedding"]
snake_case_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case_ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(a__ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
lowercase__ : List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 187 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20}
_UpperCAmelCase = do_thumbnail
_UpperCAmelCase = do_align_axis
_UpperCAmelCase = do_pad
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[str] = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 329 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
@dataclass
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , __UpperCAmelCase : int=False , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : List[Any]=6.0 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Any=False , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Any="fp4" , __UpperCAmelCase : int=False , **__UpperCAmelCase : Any , ) ->str:
"""simple docstring"""
a = load_in_abit
a = load_in_abit
a = llm_inta_threshold
a = llm_inta_skip_modules
a = llm_inta_enable_fpaa_cpu_offload
a = llm_inta_has_fpaa_weight
a = bnb_abit_quant_type
a = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
a = torch.floataa
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.dtype ):
a = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , _SCREAMING_SNAKE_CASE ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _SCREAMING_SNAKE_CASE ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _SCREAMING_SNAKE_CASE ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , _SCREAMING_SNAKE_CASE ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , _SCREAMING_SNAKE_CASE ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , _SCREAMING_SNAKE_CASE ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def __lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __lowerCAmelCase ( cls : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Optional[Any] ) ->Dict:
"""simple docstring"""
a = cls(**_SCREAMING_SNAKE_CASE )
a = []
for key, value in kwargs.items():
if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
to_remove.append(_SCREAMING_SNAKE_CASE )
for key in to_remove:
kwargs.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] ) ->Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
a = self.to_dict()
a = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '''\n'''
writer.write(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : List[str] ) ->Dict[str, Any]:
"""simple docstring"""
a = copy.deepcopy(self.__dict__ )
a = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def __lowerCAmelCase ( self : str , __UpperCAmelCase : Dict = True ) ->str:
"""simple docstring"""
if use_diff is True:
a = self.to_diff_dict()
else:
a = self.to_dict()
return json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + "\n"
def __lowerCAmelCase ( self : int ) ->Dict[str, Any]:
"""simple docstring"""
a = self.to_dict()
# get the default config dict
a = BitsAndBytesConfig().to_dict()
a = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
a = value
return serializable_config_dict
| 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : List[str] = 'openai-gpt'
_a : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = afn
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ort.SessionOptions()
a = False
return options
def UpperCamelCase_ (self ):
"""simple docstring"""
a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
a = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a = "A red cat sitting on a park bench"
a = np.random.RandomState(0 )
a = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , )
a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 227 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
| 329 | 0 |
'''simple docstring'''
lowercase__ : Optional[Any] = '''Input must be a string of 8 numbers plus letter'''
lowercase__ : List[Any] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def a__ ( lowercase : str ) -> bool:
"""simple docstring"""
if not isinstance(a__, a__ ):
_UpperCamelCase = F"""Expected string as input, found {type(a__ ).__name__}"""
raise TypeError(a__ )
_UpperCamelCase = spanish_id.replace('''-''', '''''' ).upper()
if len(a__ ) != 9:
raise ValueError(a__ )
try:
_UpperCamelCase = int(spanish_id_clean[0:8] )
_UpperCamelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(a__ ) from ex
if letter.isdigit():
raise ValueError(a__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ :Optional[int] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any:
'''simple docstring'''
require_version(deps[pkg] , a__ )
| 329 | 0 |
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE ( ) -> Generator[int, None, None]:
__A , __A : Dict = 0, 1
while True:
__A , __A : Union[str, Any] = b, a + b
yield b
def _SCREAMING_SNAKE_CASE ( a = 10_00 ) -> int:
__A : str = 1
__A : Optional[int] = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 280 |
from __future__ import annotations
def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start]
while stack:
_UpperCAmelCase = stack.pop()
explored.add(a__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a__ )
return explored
lowerCAmelCase__ :Tuple = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 329 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Dict:
assert isinstance(a__ ,a__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Dict:
__lowerCamelCase : Union[str, Any] = tmp_path / 'cache'
__lowerCamelCase : Union[str, Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCamelCase : str = ParquetDatasetReader(a__ ,cache_dir=a__ ,keep_in_memory=a__ ).read()
_check_parquet_dataset(a__ ,a__ )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> int:
__lowerCamelCase : List[Any] = tmp_path / 'cache'
__lowerCamelCase : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features
__lowerCamelCase : List[str] = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCamelCase : Dict = ParquetDatasetReader(a__ ,features=a__ ,cache_dir=a__ ).read()
_check_parquet_dataset(a__ ,a__ )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
__lowerCamelCase : int = tmp_path / 'cache'
__lowerCamelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Any = ParquetDatasetReader(a__ ,cache_dir=a__ ,split=a__ ).read()
_check_parquet_dataset(a__ ,a__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' ,[str, list] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Tuple:
if issubclass(a__ ,a__ ):
__lowerCamelCase : Union[str, Any] = parquet_path
elif issubclass(a__ ,a__ ):
__lowerCamelCase : List[Any] = [parquet_path]
__lowerCamelCase : Dict = tmp_path / 'cache'
__lowerCamelCase : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : List[str] = ParquetDatasetReader(a__ ,cache_dir=a__ ).read()
_check_parquet_dataset(a__ ,a__ )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=("train",) ) -> Union[str, Any]:
assert isinstance(a__ ,a__ )
for split in splits:
__lowerCamelCase : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : Dict = tmp_path / 'cache'
__lowerCamelCase : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCamelCase : Dict = ParquetDatasetReader(
{'train': parquet_path} ,cache_dir=a__ ,keep_in_memory=a__ ).read()
_check_parquet_datasetdict(a__ ,a__ )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : Optional[Any] = tmp_path / 'cache'
__lowerCamelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Union[str, Any] = features.copy() if features else default_expected_features
__lowerCamelCase : Union[str, Any] = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCamelCase : Optional[Any] = ParquetDatasetReader({'train': parquet_path} ,features=a__ ,cache_dir=a__ ).read()
_check_parquet_datasetdict(a__ ,a__ )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> str:
if split:
__lowerCamelCase : List[Any] = {split: parquet_path}
else:
__lowerCamelCase : Optional[int] = 'train'
__lowerCamelCase : List[str] = {'train': parquet_path, 'test': parquet_path}
__lowerCamelCase : Optional[int] = tmp_path / 'cache'
__lowerCamelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : List[str] = ParquetDatasetReader(a__ ,cache_dir=a__ ).read()
_check_parquet_datasetdict(a__ ,a__ ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
__lowerCamelCase : Any = ParquetDatasetWriter(a__ ,tmp_path / 'foo.parquet' )
assert writer.write() > 0
__lowerCamelCase : Tuple = pq.ParquetFile(tmp_path / 'foo.parquet' )
__lowerCamelCase : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
__lowerCamelCase : List[str] = str(shared_datadir / 'test_image_rgb.jpg' )
__lowerCamelCase : str = {'image': [image_path]}
__lowerCamelCase : Optional[int] = Features({'image': Image()} )
__lowerCamelCase : Union[str, Any] = Dataset.from_dict(a__ ,features=a__ )
__lowerCamelCase : int = ParquetDatasetWriter(a__ ,tmp_path / 'foo.parquet' )
assert writer.write() > 0
__lowerCamelCase : int = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
__lowerCamelCase : str = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) ,streaming=a__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' ,[
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
assert get_writer_batch_size(a__ ) == expected
| 208 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 329 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A : Optional[int] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __lowerCAmelCase ( a__ , a__=None ) -> Any:
require_version(deps[pkg] , a__ ) | 6 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ :int = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class __a ( UpperCAmelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if prompt is not None:
_UpperCAmelCase = prompt
if generate_kwargs is not None:
_UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
_UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
_UpperCAmelCase = self.model.config.model_type
if model_type == "git":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids
_UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_UpperCAmelCase = None
return model_inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['input_ids'] )
):
_UpperCAmelCase = None
if generate_kwargs is None:
_UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCAmelCase = model_inputs.pop(self.model.main_input_name )
_UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
for output_ids in model_outputs:
_UpperCAmelCase = {
'generated_text': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , )
}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 329 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_snake_case = '''sshleifer/bart-tiny-random'''
_snake_case = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , *_lowerCAmelCase : str = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE, tempfile.mkdtemp(), e=1, d=1)
self.assertEqual(student.config.num_hidden_layers, 1)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , *_lowerCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE, tempfile.mkdtemp(), e=1, d=_SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , *_lowerCAmelCase : Any = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE, tempfile.mkdtemp(), e=1, d=_SCREAMING_SNAKE_CASE)
self.assertEqual(student.config.encoder_layers, 1)
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , *_lowerCAmelCase : List[str] = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE, tempfile.mkdtemp(), e=1, d=1)
self.assertEqual(student.config.encoder_layers, 1)
self.assertEqual(student.config.decoder_layers, 1)
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE):
create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE, tempfile.mkdtemp(), e=_SCREAMING_SNAKE_CASE, d=_SCREAMING_SNAKE_CASE)
| 36 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase = take_from
_UpperCAmelCase = ()
if not isinstance(args[0] , a__ ):
_UpperCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
_UpperCAmelCase = None
if isinstance(a__ , a__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(a__ ),)
_UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(a__ , a__ ):
values += (getattr(a__ , a__ ),)
_UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_UpperCAmelCase = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , a__ , stacklevel=a__ )
if isinstance(a__ , a__ ) and len(a__ ) > 0:
_UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase = call_frame.filename
_UpperCAmelCase = call_frame.lineno
_UpperCAmelCase = call_frame.function
_UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(a__ ) == 0:
return
elif len(a__ ) == 1:
return values[0]
return values
| 329 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = str(a__ )
return n == n[::-1]
def a__ ( a__ = 1_00_00_00 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , a__ ):
if is_palindrome(a__ ) and is_palindrome(bin(a__ ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 267 |
import math
lowerCAmelCase__ :Optional[int] = 1_0
lowerCAmelCase__ :Optional[Any] = 7
lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( a__: int = 2_0 ) -> str:
'''simple docstring'''
_UpperCAmelCase = math.comb(a__ , a__ )
_UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ )
_UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Dict = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 117 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a__ : List[Any] = logging.get_logger(__name__)
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> int:
"""simple docstring"""
__A = UniSpeechSatForSequenceClassification.from_pretrained(a__ , config=a__ )
__A = downstream_dict['projector.weight']
__A = downstream_dict['projector.bias']
__A = downstream_dict['model.post_net.linear.weight']
__A = downstream_dict['model.post_net.linear.bias']
return model
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> List[Any]:
"""simple docstring"""
__A = UniSpeechSatForAudioFrameClassification.from_pretrained(a__ , config=a__ )
__A = downstream_dict['model.linear.weight']
__A = downstream_dict['model.linear.bias']
return model
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> List[Any]:
"""simple docstring"""
__A = UniSpeechSatForXVector.from_pretrained(a__ , config=a__ )
__A = downstream_dict['connector.weight']
__A = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__A = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
__A = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
__A = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
__A = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
__A = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
__A = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
__A = downstream_dict['objective.W']
return model
@torch.no_grad()
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> int:
"""simple docstring"""
__A = torch.load(a__ , map_location='cpu' )
__A = checkpoint['Downstream']
__A = UniSpeechSatConfig.from_pretrained(a__ )
__A = WavaVecaFeatureExtractor.from_pretrained(
a__ , return_attention_mask=a__ , do_normalize=a__ )
__A = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
__A = convert_classification(a__ , a__ , a__ )
elif arch.endswith('ForAudioFrameClassification' ):
__A = convert_diarization(a__ , a__ , a__ )
elif arch.endswith('ForXVector' ):
__A = convert_xvector(a__ , a__ , a__ )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
__A = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
a__ : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 161 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoConfig.from_pretrained(a__ )
_UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ )
_UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ )
_UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
_UpperCAmelCase = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_UpperCAmelCase = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_global_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = tax_mlp_layer_norm
_UpperCAmelCase = flax_model_encoder_layer_block
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_global_rel_embedding
# Assigning
_UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale']
_UpperCAmelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
_UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_pre_attention_layer_norm
_UpperCAmelCase = tax_enc_dec_attention_key
_UpperCAmelCase = tax_enc_dec_attention_out
_UpperCAmelCase = tax_enc_dec_attention_query
_UpperCAmelCase = tax_enc_dec_attention_value
_UpperCAmelCase = tax_cross_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = txa_mlp_layer_norm
_UpperCAmelCase = flax_model_decoder_layer_block
# Decoder Normalization
_UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale']
_UpperCAmelCase = txa_decoder_norm
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_decoder_rel_embedding
# Token Embeddings
_UpperCAmelCase = tax_model['target']['token_embedder']['embedding']
_UpperCAmelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(a__ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
lowerCAmelCase__ :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowerCAmelCase__ :List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 329 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = 'new-model'
if is_tf_available():
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = NewModelConfig
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = "bert-base-cased"
snake_case_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = "bert-base-cased"
snake_case_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def snake_case__ ( self : Dict ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
snake_case_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def snake_case__ ( self : List[str] ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
snake_case_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_probability
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
snake_case_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = TFAutoModelForTableQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = copy.deepcopy(model.config )
snake_case_ = ["FunnelBaseModel"]
snake_case_ = TFAutoModel.from_config(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Dict ):
"""simple docstring"""
try:
AutoConfig.register("new-model" , _SCREAMING_SNAKE_CASE )
snake_case_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
auto_class.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auto_class.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
auto_class.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case_ = BertModelTester(self ).get_config()
snake_case_ = NewModelConfig(**tiny_config.to_dict() )
snake_case_ = auto_class.from_config(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = auto_class.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def snake_case__ ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , "bert-base is not a local folder and is not a valid model identifier" ):
snake_case_ = TFAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
snake_case_ = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , revision="aaaaaa" )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
snake_case_ = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , "Use `from_pt=True` to load this model" ):
snake_case_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
snake_case_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
snake_case_ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
snake_case_ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 187 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : str = 'ctrl'
_a : Tuple = ['past_key_values']
_a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 0 |
def _a ( a :list[int] , a :list[int] , a :int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(a__ ) )
def _a ( a :list[list[int]] , a :int , a :list[int] , a :int ) -> bool:
if index == len(a__ ):
return True
# Recursive Step
for i in range(a__ ):
if valid_coloring(graph[index] , a__ , a__ ):
# Color current vertex
a = i
# Validate coloring
if util_color(a__ , a__ , a__ , index + 1 ):
return True
# Backtrack
a = -1
return False
def _a ( a :list[list[int]] , a :int ) -> list[int]:
a = [-1] * len(a__ )
if util_color(a__ , a__ , a__ , 0 ):
return colored_vertices
return []
| 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : str = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'image_id': 39769, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 329 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowercase: Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = ['input_values', 'attention_mask']
def __init__(self , lowerCamelCase_ = 1 , lowerCamelCase_ = 16000 , lowerCamelCase_ = 0.0 , lowerCamelCase_ = False , lowerCamelCase_ = 80 , lowerCamelCase_ = 16 , lowerCamelCase_ = 64 , lowerCamelCase_ = "hann_window" , lowerCamelCase_ = 1.0 , lowerCamelCase_ = 80 , lowerCamelCase_ = 7600 , lowerCamelCase_ = 1E-1_0 , lowerCamelCase_ = 2 , lowerCamelCase_ = True , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
a = do_normalize
a = return_attention_mask
a = num_mel_bins
a = hop_length
a = win_length
a = win_function
a = frame_signal_scale
a = fmin
a = fmax
a = mel_floor
a = reduction_factor
a = win_length * sampling_rate // 1000
a = hop_length * sampling_rate // 1000
a = optimal_fft_length(self.sample_size )
a = (self.n_fft // 2) + 1
a = window_function(window_length=self.sample_size , name=self.win_function , periodic=_SCREAMING_SNAKE_CASE )
a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , _SCREAMING_SNAKE_CASE , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , _SCREAMING_SNAKE_CASE , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
a = np.array(_SCREAMING_SNAKE_CASE , np.intaa )
a = []
for vector, length in zip(_SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a = padding_value
normed_input_values.append(_SCREAMING_SNAKE_CASE )
else:
a = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def UpperCamelCase_ (self , lowerCamelCase_ , ):
"""simple docstring"""
a = spectrogram(
_SCREAMING_SNAKE_CASE , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__(self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
a = self._process_audio(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
a = None
if audio_target is not None:
a = self._process_audio(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if inputs is None:
return inputs_target
else:
a = inputs_target["input_values"]
a = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
a = decoder_attention_mask
return inputs
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ):
"""simple docstring"""
a = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
a = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a = speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [speech]
# needed to make pad() work on spectrogram inputs
a = self.feature_size
# convert into correct format for padding
if is_target:
a = [self._extract_mel_features(_SCREAMING_SNAKE_CASE ) for waveform in speech]
a = BatchFeature({"input_values": features} )
a = self.num_mel_bins
else:
a = BatchFeature({"input_values": speech} )
a = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
a = feature_size_hack
# convert input values to correct format
a = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
a = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a = input_values.astype(np.floataa )
# convert attention_mask to correct format
a = padded_inputs.get("attention_mask" )
if attention_mask is not None:
a = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a = (
attention_mask
if self._get_padding_strategies(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=_SCREAMING_SNAKE_CASE , padding_value=self.padding_value )
if return_tensors is not None:
a = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
def UpperCamelCase_ (self ):
"""simple docstring"""
a = super().to_dict()
# Don't serialize these as they are derived from the other properties.
a = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 227 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a ( unittest.TestCase ):
_a : List[str] = JukeboxTokenizer
_a : List[Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 329 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : int = 4 ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = abs(a__ ) or 4
return [[1 + x + y * row_size for x in range(a__ )] for y in range(a__ )]
def a__ ( lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a__ ) )
# OR.. transpose(reverse_column(matrix))
def a__ ( lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a__ ) )
# OR.. reverse_column(reverse_row(matrix))
def a__ ( lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a__ ) )
# OR.. transpose(reverse_row(matrix))
def a__ ( lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = [list(a__ ) for x in zip(*a__ )]
return matrix
def a__ ( lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = matrix[::-1]
return matrix
def a__ ( lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = [x[::-1] for x in matrix]
return matrix
def a__ ( lowercase : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a__ )
if __name__ == "__main__":
lowercase__ : Optional[Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
lowercase__ : Union[str, Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
lowercase__ : List[Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 324 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_UpperCAmelCase = parser.parse_args()
return args
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
def fn(a__: str ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase__ ( a__: List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_UpperCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_UpperCAmelCase = tf.train.Features(feature=a__ )
_UpperCAmelCase = tf.train.Example(features=a__ )
_UpperCAmelCase = example.SerializeToString()
records.append(a__ )
return records
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCAmelCase = min(len(a__ ) , args.limit )
_UpperCAmelCase = dataset.select(range(a__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCAmelCase = tokenize_function(a__ )
_UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a__: Optional[int] ):
# Concatenate all texts.
_UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
_UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCAmelCase = len(dataset_snapshot['input_ids'] )
_UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCAmelCase = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
_UpperCAmelCase = serialized_examples[i]
out_file.write(a__ )
print('Wrote file {} containing {} records'.format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = parse_args()
main(args)
| 329 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase : Tuple = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase : Optional[Any] = {'''facebook/blenderbot_small-90M''': 5_12}
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
__A : int = set()
__A : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : Optional[int] = char
__A : Any = set(a__ )
return pairs
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _A , _A , _A="__start__" , _A="__end__" , _A="__unk__" , _A="__null__" , **_A , ):
super().__init__(unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle:
__A : str = json.load(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as merges_handle:
__A : List[str] = merges_handle.read().split('\n' )[1:-1]
__A : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
__A : List[str] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__A : List[str] = {}
@property
def UpperCAmelCase_ ( self ):
return len(self.encoder )
def UpperCAmelCase_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , _A ):
if token in self.cache:
return self.cache[token]
__A : Dict = re.sub('([.,!?()])' , R' \1' , _SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = re.sub('(\')' , R' \1 ' , _SCREAMING_SNAKE_CASE )
__A : Tuple = re.sub(R'\s{2,}' , ' ' , _SCREAMING_SNAKE_CASE )
if "\n" in token:
__A : Dict = token.replace('\n' , ' __newln__' )
__A : Tuple = token.split(' ' )
__A : Optional[int] = []
for token in tokens:
if not len(_SCREAMING_SNAKE_CASE ):
continue
__A : int = token.lower()
__A : List[str] = tuple(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
__A : List[str] = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
words.append(_SCREAMING_SNAKE_CASE )
continue
while True:
__A : Optional[Any] = min(_SCREAMING_SNAKE_CASE , key=lambda _A : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__A , __A : List[Any] = bigram
__A : int = []
__A : Optional[int] = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
__A : int = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
new_word.extend(word[i:j] )
__A : List[str] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__A : Union[str, Any] = tuple(_SCREAMING_SNAKE_CASE )
__A : Any = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
__A : int = get_pairs(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = '@@ '.join(_SCREAMING_SNAKE_CASE )
__A : List[Any] = word[:-4]
__A : List[Any] = word
words.append(_SCREAMING_SNAKE_CASE )
return " ".join(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self , _A ):
__A : Any = []
__A : Union[str, Any] = re.findall(R'\S+\n?' , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(' ' ) ) )
return split_tokens
def UpperCAmelCase_ ( self , _A ):
__A : Union[str, Any] = token.lower()
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self , _A ):
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def UpperCAmelCase_ ( self , _A ):
__A : Optional[int] = ' '.join(_SCREAMING_SNAKE_CASE ).replace('@@ ' , '' ).strip()
return out_string
def UpperCAmelCase_ ( self , _A , _A = None ):
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__A : Any = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + '\n' )
__A : Optional[Any] = 0
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__A : Tuple = token_index
writer.write(' '.join(_SCREAMING_SNAKE_CASE ) + '\n' )
index += 1
return vocab_file, merge_file
| 280 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = []
for step in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(a__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , a__ )
_UpperCAmelCase = torch.load(a__ )
scheduler.load_state_dict(a__ )
return lrs
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __a ( unittest.TestCase ):
_a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None
_a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_a : List[Any] = 10
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' )
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = fn
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = '''Hello world! cécé herlolip'''
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> str:
__lowerCamelCase : Optional[int] = FairseqRobertaModel.from_pretrained(a__ )
roberta.eval() # disable dropout
__lowerCamelCase : List[str] = roberta.model.encoder.sentence_encoder
__lowerCamelCase : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
__lowerCamelCase : int = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' ,a__ )
__lowerCamelCase : List[str] = XLMRobertaXLForSequenceClassification(a__ ) if classification_head else XLMRobertaXLForMaskedLM(a__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase : List[str] = roberta_sent_encoder.embed_tokens.weight
__lowerCamelCase : Optional[int] = roberta_sent_encoder.embed_positions.weight
__lowerCamelCase : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__lowerCamelCase : Dict = roberta_sent_encoder.layer_norm.weight
__lowerCamelCase : Optional[int] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase : Dict = model.roberta.encoder.layer[i]
__lowerCamelCase : Union[str, Any] = roberta_sent_encoder.layers[i]
__lowerCamelCase : Tuple = layer.attention
__lowerCamelCase : Optional[int] = roberta_layer.self_attn_layer_norm.weight
__lowerCamelCase : str = roberta_layer.self_attn_layer_norm.bias
# self attention
__lowerCamelCase : Optional[int] = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__lowerCamelCase : Optional[int] = roberta_layer.self_attn.q_proj.weight
__lowerCamelCase : Union[str, Any] = roberta_layer.self_attn.q_proj.bias
__lowerCamelCase : Dict = roberta_layer.self_attn.k_proj.weight
__lowerCamelCase : Optional[int] = roberta_layer.self_attn.k_proj.bias
__lowerCamelCase : int = roberta_layer.self_attn.v_proj.weight
__lowerCamelCase : Optional[int] = roberta_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase : List[Any] = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__lowerCamelCase : Any = roberta_layer.self_attn.out_proj.weight
__lowerCamelCase : List[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__lowerCamelCase : Any = roberta_layer.final_layer_norm.weight
__lowerCamelCase : str = roberta_layer.final_layer_norm.bias
# intermediate
__lowerCamelCase : Optional[Any] = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCamelCase : Union[str, Any] = roberta_layer.fca.weight
__lowerCamelCase : Any = roberta_layer.fca.bias
# output
__lowerCamelCase : Any = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCamelCase : Union[str, Any] = roberta_layer.fca.weight
__lowerCamelCase : Tuple = roberta_layer.fca.bias
# end of layer
if classification_head:
__lowerCamelCase : int = roberta.model.classification_heads['mnli'].dense.weight
__lowerCamelCase : List[str] = roberta.model.classification_heads['mnli'].dense.bias
__lowerCamelCase : List[Any] = roberta.model.classification_heads['mnli'].out_proj.weight
__lowerCamelCase : List[str] = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowerCamelCase : Tuple = roberta.model.encoder.lm_head.dense.weight
__lowerCamelCase : str = roberta.model.encoder.lm_head.dense.bias
__lowerCamelCase : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase : List[str] = roberta.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase : Dict = roberta.model.encoder.lm_head.weight
__lowerCamelCase : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase : Dict = roberta.encode(a__ ).unsqueeze(0 ) # batch of size 1
__lowerCamelCase : Optional[int] = model(a__ )[0]
if classification_head:
__lowerCamelCase : int = roberta.model.classification_heads['mnli'](roberta.extract_features(a__ ) )
else:
__lowerCamelCase : Tuple = roberta.model(a__ )[0]
print(our_output.shape ,their_output.shape )
__lowerCamelCase : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__lowerCamelCase : Tuple = torch.allclose(a__ ,a__ ,atol=1E-3 )
print('Do both models output the same tensors?' ,'🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(a__ ).mkdir(parents=a__ ,exist_ok=a__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(a__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_UpperCamelCase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 208 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :Any = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = original_name.split('.' )[0]
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] )
_UpperCAmelCase = orig_block_num - offset
_UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_UpperCAmelCase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase = key[: key.find('proj' )]
_UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' )
_UpperCAmelCase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' )
if "norm2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = model_name[-3:]
_UpperCAmelCase = 1_0_0_0
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_0_0_0)
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase = [2, 2, 6, 2]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s24":
_UpperCAmelCase = [4, 4, 1_2, 4]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.9
elif size == "m36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
elif size == "m48":
_UpperCAmelCase = [8, 8, 2_4, 8]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
# Prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(a__ )
# create HuggingFace model and load state dict
_UpperCAmelCase = PoolFormerForImageClassification(a__ )
model.load_state_dict(a__ )
model.eval()
# Define image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_UpperCAmelCase = model(a__ )
_UpperCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__=True , a__="pt" ) -> int:
__a = {'''add_prefix_space''': True} if isinstance(a__ , a__ ) and not line.startswith(''' ''' ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=a__ , padding='''max_length''' if pad_to_max_length else None , truncation=a__ , return_tensors=a__ , add_special_tokens=a__ , **a__ , )
def __lowerCAmelCase ( a__ , a__ , a__=None , ) -> Tuple:
__a = input_ids.ne(a__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __A( a ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case="train" , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case="" , ) -> Dict:
'''simple docstring'''
super().__init__()
__a = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + '''.source''' )
__a = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + '''.target''' )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ) -> List[str]:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , _snake_case ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , _SCREAMING_SNAKE_CASE ).rstrip('''\n''' )
__a = linecache.getline(str(self.tgt_file ) , _SCREAMING_SNAKE_CASE ).rstrip('''\n''' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ) else self.tokenizer
__a = encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_source_length , '''right''' )
__a = encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_target_length , '''right''' )
__a = source_inputs['''input_ids'''].squeeze()
__a = target_inputs['''input_ids'''].squeeze()
__a = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case ) -> Any:
'''simple docstring'''
return [len(_SCREAMING_SNAKE_CASE ) for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
__a = torch.stack([x['''input_ids'''] for x in batch] )
__a = torch.stack([x['''attention_mask'''] for x in batch] )
__a = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
__a = trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a = trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__a = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
A : str = getLogger(__name__)
def __lowerCAmelCase ( a__ ) -> Tuple:
return list(itertools.chain.from_iterable(a__ ) )
def __lowerCAmelCase ( a__ ) -> None:
__a = get_git_info()
save_json(a__ , os.path.join(a__ , '''git_log.json''' ) )
def __lowerCAmelCase ( a__ , a__ , a__=4 , **a__ ) -> List[Any]:
with open(a__ , '''w''' ) as f:
json.dump(a__ , a__ , indent=a__ , **a__ )
def __lowerCAmelCase ( a__ ) -> Tuple:
with open(a__ ) as f:
return json.load(a__ )
def __lowerCAmelCase ( ) -> List[Any]:
__a = git.Repo(search_parent_directories=a__ )
__a = {
'''repo_id''': str(a__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __lowerCAmelCase ( a__ , a__ ) -> List:
return list(map(a__ , a__ ) )
def __lowerCAmelCase ( a__ , a__ ) -> str:
with open(a__ , '''wb''' ) as f:
return pickle.dump(a__ , a__ )
def __lowerCAmelCase ( a__ ) -> Dict:
def remove_articles(a__ ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , a__ )
def white_space_fix(a__ ):
return " ".join(text.split() )
def remove_punc(a__ ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__ ) ) ) )
def __lowerCAmelCase ( a__ , a__ ) -> Tuple:
__a = normalize_answer(a__ ).split()
__a = normalize_answer(a__ ).split()
__a = Counter(a__ ) & Counter(a__ )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(a__ )
__a = 1.0 * num_same / len(a__ )
__a = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase ( a__ , a__ ) -> List[Any]:
return normalize_answer(a__ ) == normalize_answer(a__ )
def __lowerCAmelCase ( a__ , a__ ) -> Dict:
assert len(a__ ) == len(a__ )
__a = 0
for hypo, pred in zip(a__ , a__ ):
em += exact_match_score(a__ , a__ )
if len(a__ ) > 0:
em /= len(a__ )
return {"em": em}
def __lowerCAmelCase ( a__ ) -> int:
return model_prefix.startswith('''rag''' )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> List[str]:
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = '''dropout_rate'''
for p in extra_params:
if getattr(a__ , a__ , a__ ):
if not hasattr(a__ , a__ ) and not hasattr(a__ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(a__ ) )
delattr(a__ , a__ )
continue
__a = p if hasattr(a__ , a__ ) else equivalent_param[p]
setattr(a__ , a__ , getattr(a__ , a__ ) )
delattr(a__ , a__ )
return hparams, config | 6 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = process
_UpperCAmelCase = params
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dataset[i]
_UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params )
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = loader
_UpperCAmelCase = infer
_UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase = None
_UpperCAmelCase = loader_batch_size
# Internal bookkeeping
_UpperCAmelCase = None
_UpperCAmelCase = None
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase = next(self.iterator )
_UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase = processed
_UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
_UpperCAmelCase = None
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.subiterator is None:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCAmelCase = next(self.subiterator )
return processed
class __a ( UpperCAmelCase ):
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
_UpperCAmelCase = processed
_UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_UpperCAmelCase = processed
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
return accumulator
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = key
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = keya
_UpperCAmelCase = keya
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 329 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_snake_case = logging.getLogger(__name__)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=a__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=a__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=a__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=a__ , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=a__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=a__ , type=a__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=a__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=a__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
_lowerCAmelCase : str = parser.parse_args()
return args
def A ( _lowerCamelCase ):
'''simple docstring'''
def fn(_lowerCamelCase ):
return tokenizer(examples["text"] )
return fn
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for i in range(len(tokenized_data["input_ids"] ) ):
_lowerCAmelCase : Optional[int] = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
_lowerCAmelCase : List[str] = tf.train.Features(feature=a__ )
_lowerCAmelCase : str = tf.train.Example(features=a__ )
_lowerCAmelCase : Union[str, Any] = example.SerializeToString()
records.append(a__ )
return records
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_lowerCAmelCase : Union[str, Any] = min(len(a__ ) , args.limit )
_lowerCAmelCase : Any = dataset.select(range(a__ ) )
print(F"Limiting the dataset to {args.limit} entries." )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
_lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_lowerCAmelCase : Tuple = tokenize_function(a__ )
_lowerCAmelCase : List[str] = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_lowerCamelCase ):
# Concatenate all texts.
_lowerCAmelCase : Dict = {k: sum(examples[k] , [] ) for k in examples.keys()}
_lowerCAmelCase : Optional[int] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_lowerCAmelCase : Optional[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_lowerCAmelCase : List[str] = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_lowerCAmelCase : List[Any] = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_000 , num_proc=4 )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[Any] = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
_lowerCAmelCase : Dict = grouped_dataset[shard : shard + args.shard_size]
_lowerCAmelCase : Dict = len(dataset_snapshot["input_ids"] )
_lowerCAmelCase : List[str] = os.path.join(a__ , F"dataset-{shard_count}-{records_containing}.tfrecord" )
_lowerCAmelCase : int = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
_lowerCAmelCase : Optional[int] = serialized_examples[i]
out_file.write(a__ )
print("Wrote file {} containing {} records".format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , "w" ) as f:
print(F"Total {args.split} records: {total_records}" , file=a__ )
if __name__ == "__main__":
_snake_case = parse_args()
main(args)
| 36 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __a ( UpperCAmelCase ):
_a : str = 'data2vec-text'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 329 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Any = logging.get_logger(__name__)
def a__ ( a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = original_name.split(""".""" )[0]
__SCREAMING_SNAKE_CASE = key.split(""".""" )
__SCREAMING_SNAKE_CASE = int(key_list[key_list.index(a__ ) - 2] )
__SCREAMING_SNAKE_CASE = int(key_list[key_list.index(a__ ) - 1] )
__SCREAMING_SNAKE_CASE = orig_block_num - offset
__SCREAMING_SNAKE_CASE = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = OrderedDict()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
__SCREAMING_SNAKE_CASE = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
__SCREAMING_SNAKE_CASE = key[: key.find("""proj""" )]
__SCREAMING_SNAKE_CASE = key.replace(a__ , F'patch_embeddings.{total_embed_found}.' )
__SCREAMING_SNAKE_CASE = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
__SCREAMING_SNAKE_CASE = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
__SCREAMING_SNAKE_CASE = replace_key_with_offset(a__ , a__ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
__SCREAMING_SNAKE_CASE = replace_key_with_offset(a__ , a__ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
__SCREAMING_SNAKE_CASE = replace_key_with_offset(a__ , a__ , """norm1""" , """before_norm""" )
if "norm2" in key:
__SCREAMING_SNAKE_CASE = replace_key_with_offset(a__ , a__ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
__SCREAMING_SNAKE_CASE = replace_key_with_offset(a__ , a__ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
__SCREAMING_SNAKE_CASE = replace_key_with_offset(a__ , a__ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
__SCREAMING_SNAKE_CASE = key.replace("""head""" , """classifier""" )
__SCREAMING_SNAKE_CASE = value
return new_state_dict
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
@torch.no_grad()
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = PoolFormerConfig()
# set attributes based on model_name
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = model_name[-3:]
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = (1, 10_00)
# set config attributes
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(a__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if size == "s12":
__SCREAMING_SNAKE_CASE = [2, 2, 6, 2]
__SCREAMING_SNAKE_CASE = [64, 1_28, 3_20, 5_12]
__SCREAMING_SNAKE_CASE = 4.0
__SCREAMING_SNAKE_CASE = 0.9
elif size == "s24":
__SCREAMING_SNAKE_CASE = [4, 4, 12, 4]
__SCREAMING_SNAKE_CASE = [64, 1_28, 3_20, 5_12]
__SCREAMING_SNAKE_CASE = 4.0
__SCREAMING_SNAKE_CASE = 0.9
elif size == "s36":
__SCREAMING_SNAKE_CASE = [6, 6, 18, 6]
__SCREAMING_SNAKE_CASE = [64, 1_28, 3_20, 5_12]
__SCREAMING_SNAKE_CASE = 4.0
__SCREAMING_SNAKE_CASE = 1E-6
__SCREAMING_SNAKE_CASE = 0.9
elif size == "m36":
__SCREAMING_SNAKE_CASE = [6, 6, 18, 6]
__SCREAMING_SNAKE_CASE = [96, 1_92, 3_84, 7_68]
__SCREAMING_SNAKE_CASE = 4.0
__SCREAMING_SNAKE_CASE = 1E-6
__SCREAMING_SNAKE_CASE = 0.95
elif size == "m48":
__SCREAMING_SNAKE_CASE = [8, 8, 24, 8]
__SCREAMING_SNAKE_CASE = [96, 1_92, 3_84, 7_68]
__SCREAMING_SNAKE_CASE = 4.0
__SCREAMING_SNAKE_CASE = 1E-6
__SCREAMING_SNAKE_CASE = 0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=a__ )
# Prepare image
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=a__ , return_tensors="""pt""" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location=torch.device("""cpu""" ) )
# rename keys
__SCREAMING_SNAKE_CASE = rename_keys(a__ )
# create HuggingFace model and load state dict
__SCREAMING_SNAKE_CASE = PoolFormerForImageClassification(a__ )
model.load_state_dict(a__ )
model.eval()
# Define image processor
__SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=a__ )
__SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
__SCREAMING_SNAKE_CASE = model(a__ )
__SCREAMING_SNAKE_CASE = outputs.logits
# define expected logit slices for different models
if size == "s12":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
__SCREAMING_SNAKE_CASE = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCAmelCase : Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 267 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_a : Optional[int] = False
_a : List[str] = False
_a : List[str] = False
_a : Optional[int] = False
_a : Tuple = False
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has'''
f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class __a ( unittest.TestCase , UpperCAmelCase ):
_a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_a : Any = MaskFormerSwinConfig
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 329 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : int = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = 'swinv2'
lowerCAmelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self :List[str] , _UpperCamelCase :List[Any]=224 , _UpperCamelCase :int=4 , _UpperCamelCase :Any=3 , _UpperCamelCase :Dict=96 , _UpperCamelCase :List[Any]=[2, 2, 6, 2] , _UpperCamelCase :List[str]=[3, 6, 12, 24] , _UpperCamelCase :Union[str, Any]=7 , _UpperCamelCase :Tuple=4.0 , _UpperCamelCase :Dict=True , _UpperCamelCase :Optional[int]=0.0 , _UpperCamelCase :str=0.0 , _UpperCamelCase :int=0.1 , _UpperCamelCase :List[str]="gelu" , _UpperCamelCase :Tuple=False , _UpperCamelCase :Union[str, Any]=0.0_2 , _UpperCamelCase :Dict=1e-5 , _UpperCamelCase :int=32 , **_UpperCamelCase :str , )-> Tuple:
super().__init__(**_SCREAMING_SNAKE_CASE )
__A = image_size
__A = patch_size
__A = num_channels
__A = embed_dim
__A = depths
__A = len(_SCREAMING_SNAKE_CASE )
__A = num_heads
__A = window_size
__A = mlp_ratio
__A = qkv_bias
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = drop_path_rate
__A = hidden_act
__A = use_absolute_embeddings
__A = layer_norm_eps
__A = initializer_range
__A = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
__A = (0, 0, 0, 0)
| 117 |
from collections.abc import Generator
def lowerCAmelCase__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 329 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
((__A) , (__A)) = extended_euclid(a__ , a % b )
__A = a // b
return (y, x - k * y)
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> int:
"""simple docstring"""
((__A) , (__A)) = extended_euclid(a__ , a__ )
__A = na * na
__A = ra * x * na + ra * y * na
return (n % m + m) % m
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> int:
"""simple docstring"""
((__A) , (__A)) = extended_euclid(a__ , a__ )
if b < 0:
__A = (b % n + n) % n
return b
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> int:
"""simple docstring"""
__A , __A = invert_modulo(a__ , a__ ), invert_modulo(a__ , a__ )
__A = na * na
__A = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 161 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 30}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 329 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , __lowercase : Union[str, Any] , __lowercase : List[str]=7 , __lowercase : str=3 , __lowercase : Dict=18 , __lowercase : str=30 , __lowercase : Any=4_00 , __lowercase : Tuple=True , __lowercase : int=None , __lowercase : Any=True , __lowercase : Any=None , __lowercase : List[Any]=True , ):
"""simple docstring"""
snake_case_ = size if size is not None else {"shortest_edge": 20}
snake_case_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_flip_channel_order
def snake_case__ ( self : str ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = MobileViTImageProcessor if is_vision_available() else None
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = MobileViTImageProcessingTester(self )
@property
def snake_case__ ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_center_crop" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "center_crop" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_flip_channel_order" ) )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
pass
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 187 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20}
_UpperCAmelCase = do_thumbnail
_UpperCAmelCase = do_align_axis
_UpperCAmelCase = do_pad
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[str] = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 329 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _a ( a :int ) -> List[str]:
def is_in_circle(a :float , a :float ) -> bool:
a = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(a__ ) )
# The ratio of the area for circle to square is pi/4.
a = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _a ( a :int , a :Callable[[float], float] , a :float = 0.0 , a :float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(a__ , a__ ) ) for _ in range(a__ ) ) * (max_value - min_value)
def _a ( a :int , a :float = 0.0 , a :float = 1.0 ) -> None:
def identity_function(a :float ) -> float:
return x
a = area_under_curve_estimator(
a__ , a__ , a__ , a__ )
a = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print('''******************''' )
def _a ( a :int ) -> None:
def function_to_integrate(a :float ) -> float:
return sqrt(4.0 - x * x )
a = area_under_curve_estimator(
a__ , a__ , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : List[str] = 'openai-gpt'
_a : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = afn
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ = "▁" , lowerCamelCase_ = True , lowerCamelCase_ = "<unk>" , lowerCamelCase_ = "</s>" , lowerCamelCase_ = "<pad>" , ):
"""simple docstring"""
a = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
a = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
a = token_dict["token"]
a = Tokenizer(Unigram() )
a = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
a = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE ),
pre_tokenizers.Digits(individual_digits=_SCREAMING_SNAKE_CASE ),
pre_tokenizers.Punctuation(),
] )
a = decoders.Metaspace(replacement=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
a = TemplateProcessing(
single=F'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
a = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = 8000 , lowerCamelCase_ = True , ):
"""simple docstring"""
a = trainers.UnigramTrainer(
vocab_size=_SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=_SCREAMING_SNAKE_CASE , )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a = [files]
self._tokenizer.train(_SCREAMING_SNAKE_CASE , trainer=_SCREAMING_SNAKE_CASE )
self.add_unk_id()
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = 8000 , lowerCamelCase_ = True , ):
"""simple docstring"""
a = trainers.UnigramTrainer(
vocab_size=_SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=_SCREAMING_SNAKE_CASE , )
self._tokenizer.train_from_iterator(_SCREAMING_SNAKE_CASE , trainer=_SCREAMING_SNAKE_CASE )
self.add_unk_id()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = json.loads(self._tokenizer.to_str() )
a = self.special_tokens["unk"]["id"]
a = Tokenizer.from_str(json.dumps(_SCREAMING_SNAKE_CASE ) )
| 227 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
| 329 | 0 |
'''simple docstring'''
import math
lowercase__ : Optional[int] = 10
lowercase__ : Optional[Any] = 7
lowercase__ : Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a__ ( lowercase : int = 20 ) -> str:
"""simple docstring"""
_UpperCamelCase = math.comb(a__, a__ )
_UpperCamelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR, a__ )
_UpperCamelCase = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 324 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ :Optional[int] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any:
'''simple docstring'''
require_version(deps[pkg] , a__ )
| 329 | 0 |
from __future__ import annotations
from typing import TypedDict
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str
UpperCamelCase : int
def _SCREAMING_SNAKE_CASE ( a ) -> list[str]:
if not isinstance(a__ , a__ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(a__ ) )]
def _SCREAMING_SNAKE_CASE ( a ) -> BWTTransformDict:
if not isinstance(a__ , a__ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
__A : Tuple = all_rotations(a__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__A : Optional[Any] = {
'bwt_string': ''.join([word[-1] for word in rotations] ),
'idx_original_string': rotations.index(a__ ),
}
return response
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
if not isinstance(a__ , a__ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
__A : Optional[Any] = int(a__ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(a__ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
__A : Tuple = [''] * len(a__ )
for _ in range(len(a__ ) ):
for i in range(len(a__ ) ):
__A : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase : int = '''Provide a string that I will generate its BWT transform: '''
UpperCAmelCase : int = input(entry_msg).strip()
UpperCAmelCase : Any = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string \'{s}\' results """
F"""in \'{result["bwt_string"]}\'"""
)
UpperCAmelCase : List[Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"""Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' """
F"""we get original string \'{original_string}\'"""
)
| 280 |
from __future__ import annotations
def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start]
while stack:
_UpperCAmelCase = stack.pop()
explored.add(a__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a__ )
return explored
lowerCAmelCase__ :Tuple = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 329 | 0 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Dict:
__lowerCamelCase : Dict = RobertaPreLayerNormConfig.from_pretrained(
a__ ,architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__lowerCamelCase : List[Any] = torch.load(hf_hub_download(repo_id=a__ ,filename='pytorch_model.bin' ) )
__lowerCamelCase : List[Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__lowerCamelCase : str = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__lowerCamelCase : int = tensor_value
__lowerCamelCase : Tuple = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=a__ ,config=a__ ,state_dict=a__ )
model.save_pretrained(a__ )
# convert tokenizer
__lowerCamelCase : str = AutoTokenizer.from_pretrained(a__ )
tokenizer.save_pretrained(a__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 208 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 329 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
A : List[Any] = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
A : Optional[Any] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
A : Tuple = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
A : List[str] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 6 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ :int = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class __a ( UpperCAmelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if prompt is not None:
_UpperCAmelCase = prompt
if generate_kwargs is not None:
_UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
_UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
_UpperCAmelCase = self.model.config.model_type
if model_type == "git":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids
_UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_UpperCAmelCase = None
return model_inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['input_ids'] )
):
_UpperCAmelCase = None
if generate_kwargs is None:
_UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCAmelCase = model_inputs.pop(self.model.main_input_name )
_UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
for output_ids in model_outputs:
_UpperCAmelCase = {
'generated_text': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , )
}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 329 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_snake_case = TypeVar("T")
def A ( _lowerCamelCase ):
'''simple docstring'''
return (position - 1) // 2
def A ( _lowerCamelCase ):
'''simple docstring'''
return (2 * position) + 1
def A ( _lowerCamelCase ):
'''simple docstring'''
return (2 * position) + 2
class UpperCAmelCase_ ( Generic[T]):
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Any = []
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : List[str] = 0
def __len__( self):
'''simple docstring'''
return self.elements
def __repr__( self):
'''simple docstring'''
return str(self.heap)
def snake_case__ ( self):
'''simple docstring'''
return self.elements == 0
def snake_case__ ( self, __a, __a):
'''simple docstring'''
self.heap.append((elem, weight))
_lowerCAmelCase : Union[str, Any] = self.elements
self.elements += 1
self._bubble_up(_SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0, self.elements - 1)
_lowerCAmelCase , _lowerCAmelCase : Dict = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.heap[0]
self._bubble_down(_SCREAMING_SNAKE_CASE)
return elem
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = self.position_map[elem]
_lowerCAmelCase : int = (elem, weight)
if position > 0:
_lowerCAmelCase : Optional[int] = get_parent_position(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_SCREAMING_SNAKE_CASE)
else:
self._bubble_down(_SCREAMING_SNAKE_CASE)
else:
self._bubble_down(_SCREAMING_SNAKE_CASE)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position_map[elem]
if curr_pos == 0:
return None
_lowerCAmelCase : Dict = get_parent_position(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.heap[curr_pos]
_lowerCAmelCase , _lowerCAmelCase : Dict = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
return self._bubble_up(_SCREAMING_SNAKE_CASE)
return None
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.position_map[elem]
_lowerCAmelCase , _lowerCAmelCase : str = self.heap[curr_pos]
_lowerCAmelCase : Dict = get_child_left_position(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : int = get_child_right_position(_SCREAMING_SNAKE_CASE)
if child_left_position < self.elements and child_right_position < self.elements:
_lowerCAmelCase , _lowerCAmelCase : Dict = self.heap[child_left_position]
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
return self._bubble_down(_SCREAMING_SNAKE_CASE)
if child_left_position < self.elements:
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
return self._bubble_down(_SCREAMING_SNAKE_CASE)
else:
return None
if child_right_position < self.elements:
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
return self._bubble_down(_SCREAMING_SNAKE_CASE)
return None
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.heap[nodea_pos][0]
_lowerCAmelCase : Any = self.heap[nodea_pos][0]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_lowerCAmelCase : int = nodea_pos
_lowerCAmelCase : int = nodea_pos
class UpperCAmelCase_ ( Generic[T]):
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = 0
def __repr__( self):
'''simple docstring'''
return str(self.connections)
def __len__( self):
'''simple docstring'''
return self.nodes
def snake_case__ ( self, __a):
'''simple docstring'''
if node not in self.connections:
_lowerCAmelCase : Dict = {}
self.nodes += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_node(_SCREAMING_SNAKE_CASE)
self.add_node(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[Any] = weight
_lowerCAmelCase : Union[str, Any] = weight
def A ( _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Any = {node: maxsize for node in graph.connections}
_lowerCAmelCase : Optional[Any] = {node: None for node in graph.connections}
_lowerCAmelCase : str = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(a__ , a__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_lowerCAmelCase : Union[str, Any] = priority_queue.extract_min()
_lowerCAmelCase : int = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowerCAmelCase : Union[str, Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
_lowerCAmelCase : Dict = node
# running prim's algorithm
while not priority_queue.is_empty():
_lowerCAmelCase : List[Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowerCAmelCase : List[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
_lowerCAmelCase : Optional[Any] = node
return dist, parent
| 36 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase = take_from
_UpperCAmelCase = ()
if not isinstance(args[0] , a__ ):
_UpperCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
_UpperCAmelCase = None
if isinstance(a__ , a__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(a__ ),)
_UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(a__ , a__ ):
values += (getattr(a__ , a__ ),)
_UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_UpperCAmelCase = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , a__ , stacklevel=a__ )
if isinstance(a__ , a__ ) and len(a__ ) > 0:
_UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase = call_frame.filename
_UpperCAmelCase = call_frame.lineno
_UpperCAmelCase = call_frame.function
_UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(a__ ) == 0:
return
elif len(a__ ) == 1:
return values[0]
return values
| 329 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__SCREAMING_SNAKE_CASE = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vqa_pipeline(_SCREAMING_SNAKE_CASE , top_k=1 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}],
[{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__SCREAMING_SNAKE_CASE = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__SCREAMING_SNAKE_CASE = """How many cats are there?"""
__SCREAMING_SNAKE_CASE = vqa_pipeline(image=_SCREAMING_SNAKE_CASE , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}, {"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}] )
__SCREAMING_SNAKE_CASE = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}, {"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
__SCREAMING_SNAKE_CASE = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__SCREAMING_SNAKE_CASE = """How many cats are there?"""
__SCREAMING_SNAKE_CASE = vqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
__SCREAMING_SNAKE_CASE = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
__SCREAMING_SNAKE_CASE = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
| 267 |
import math
lowerCAmelCase__ :Optional[int] = 1_0
lowerCAmelCase__ :Optional[Any] = 7
lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( a__: int = 2_0 ) -> str:
'''simple docstring'''
_UpperCAmelCase = math.comb(a__ , a__ )
_UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ )
_UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 329 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
lowerCAmelCase__ = XGLMConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = 'gelu'
def __init__(self :Optional[int] , _UpperCamelCase :List[str] , _UpperCamelCase :List[str]=14 , _UpperCamelCase :Optional[int]=7 , _UpperCamelCase :int=True , _UpperCamelCase :Union[str, Any]=True , _UpperCamelCase :List[Any]=True , _UpperCamelCase :Optional[Any]=99 , _UpperCamelCase :str=32 , _UpperCamelCase :Tuple=2 , _UpperCamelCase :List[str]=4 , _UpperCamelCase :int=37 , _UpperCamelCase :Union[str, Any]="gelu" , _UpperCamelCase :Any=0.1 , _UpperCamelCase :List[str]=0.1 , _UpperCamelCase :Dict=512 , _UpperCamelCase :Any=0.0_2 , )-> Dict:
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_labels
__A = vocab_size
__A = d_model
__A = num_hidden_layers
__A = num_attention_heads
__A = ffn_dim
__A = activation_function
__A = activation_dropout
__A = attention_dropout
__A = max_position_embeddings
__A = initializer_range
__A = None
__A = 0
__A = 2
__A = 1
def _lowerCAmelCase (self :List[Any] )-> Tuple:
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowerCAmelCase (self :Any )-> Optional[Any]:
__A = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = self.get_config()
__A = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase (self :List[str] )-> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_SCREAMING_SNAKE_CASE , )
def _lowerCAmelCase (self :Union[str, Any] )-> Dict:
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCAmelCase__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowerCAmelCase (self :Tuple )-> List[str]:
__A = TFXGLMModelTester(self )
__A = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , n_embd=37 )
def _lowerCAmelCase (self :Union[str, Any] )-> Optional[int]:
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase (self :Optional[Any] )-> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFXGLMModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowerCAmelCase (self :Union[str, Any] )-> Optional[Any]:
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
@slow
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :Tuple=True )-> Dict:
__A = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__A = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__A = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__A = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase (self :int )-> Any:
__A = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__A = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
__A = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
__A = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
__A = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , seed=[7, 0] )
__A = tokenizer.decode(output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__A = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase (self :int )-> Optional[int]:
__A = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__A = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__A = '''left'''
# use different length sentences to test batching
__A = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
__A = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=_SCREAMING_SNAKE_CASE )
__A = inputs['''input_ids''']
__A = model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
__A = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__A = model.generate(input_ids=_SCREAMING_SNAKE_CASE , max_new_tokens=12 )
__A = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__A = model.generate(input_ids=_SCREAMING_SNAKE_CASE , max_new_tokens=12 )
__A = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__A = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__A = tokenizer.decode(output_padded[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__A = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
| 117 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329 | 0 |
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case ( *UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
if not isinstance(a__ , a__ ):
__A = list(a__ )
for i in range(len(a__ ) ):
__A = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case ( UpperCAmelCase )-> bool:
"""simple docstring"""
__A = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(a__ , a__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case ( UpperCAmelCase = None , UpperCAmelCase = 1_2_8 )-> int:
"""simple docstring"""
if function is None:
return functools.partial(a__ , starting_batch_size=a__ )
__A = starting_batch_size
def decorator(*UpperCAmelCase , **UpperCAmelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__A = list(inspect.signature(a__ ).parameters.keys() )
# Guard against user error
if len(a__ ) < (len(a__ ) + 1):
__A = ', '.join([f'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'Batch size was passed into `{function.__name__}` as the first argument when called.'
f'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(a__ , *a__ , **a__ )
except Exception as e:
if should_reduce_batch_size(a__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 161 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoConfig.from_pretrained(a__ )
_UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ )
_UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ )
_UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
_UpperCAmelCase = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_UpperCAmelCase = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_global_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = tax_mlp_layer_norm
_UpperCAmelCase = flax_model_encoder_layer_block
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_global_rel_embedding
# Assigning
_UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale']
_UpperCAmelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
_UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_pre_attention_layer_norm
_UpperCAmelCase = tax_enc_dec_attention_key
_UpperCAmelCase = tax_enc_dec_attention_out
_UpperCAmelCase = tax_enc_dec_attention_query
_UpperCAmelCase = tax_enc_dec_attention_value
_UpperCAmelCase = tax_cross_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = txa_mlp_layer_norm
_UpperCAmelCase = flax_model_decoder_layer_block
# Decoder Normalization
_UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale']
_UpperCAmelCase = txa_decoder_norm
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_decoder_rel_embedding
# Token Embeddings
_UpperCAmelCase = tax_model['target']['token_embedder']['embedding']
_UpperCAmelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(a__ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
lowerCAmelCase__ :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowerCAmelCase__ :List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 329 | 0 |
import math
import qiskit
def lowerCamelCase__ ( _A = 1 , _A = 1 , _A = 1 ):
'''simple docstring'''
if (
isinstance(a__ , a__ )
or isinstance(a__ , a__ )
or isinstance(a__ , a__ )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(a__ ) != input_a)
or (math.floor(a__ ) != input_a)
or (math.floor(a__ ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
snake_case_ = qiskit.QuantumRegister(4 , "qr" )
snake_case_ = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
snake_case_ = [input_a, input_a, carry_in]
snake_case_ = qiskit.QuantumCircuit(a__ , a__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(a__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , a__ ) # measure the last two qbits
snake_case_ = qiskit.Aer.get_backend("aer_simulator" )
snake_case_ = qiskit.execute(a__ , a__ , shots=1000 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 187 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : str = 'ctrl'
_a : Tuple = ['past_key_values']
_a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 0 |
from functools import lru_cache
@lru_cache
def _a ( a :int ) -> int:
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : str = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'image_id': 39769, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 329 | 0 |
def a( A : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
a = []
for data in source_data:
for i, el in enumerate(a__ ):
if len(a__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a__ ) )
return data_lists
def a( A : list[list[float]] , A : list[int] ) -> list[list[float]]:
"""simple docstring"""
a = []
for dlist, weight in zip(a__ , a__ ):
a = min(a__ )
a = max(a__ )
a = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
a = f'''Invalid weight of {weight:f} provided'''
raise ValueError(a__ )
score_lists.append(a__ )
return score_lists
def a( A : list[list[float]] ) -> list[float]:
"""simple docstring"""
a = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a__ ):
a = final_scores[j] + ele
return final_scores
def a( A : list[list[float]] , A : list[int] ) -> list[list[float]]:
"""simple docstring"""
a = get_data(a__ )
a = calculate_each_score(a__ , a__ )
a = generate_final_scores(a__ )
# append scores to source data
for i, ele in enumerate(a__ ):
source_data[i].append(a__ )
return source_data
| 227 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a ( unittest.TestCase ):
_a : List[str] = JukeboxTokenizer
_a : List[Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 329 | 0 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def a__ ( lowercase : np.ndarray, lowercase : tuple[int, int], lowercase : tuple[int, int], lowercase : bool, ) -> tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = grid.shape
_UpperCamelCase = [-1, 1, 0, 0]
_UpperCamelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_UpperCamelCase , _UpperCamelCase = [(0, source)], set()
_UpperCamelCase = np.full((rows, cols), np.inf )
_UpperCamelCase = 0
_UpperCamelCase = np.empty((rows, cols), dtype=a__ )
_UpperCamelCase = None
while queue:
((_UpperCamelCase) , (_UpperCamelCase)) = heappop(a__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_UpperCamelCase = []
while (x, y) != source:
path.append((x, y) )
_UpperCamelCase , _UpperCamelCase = predecessors[x, y]
path.append(a__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a__ ) ):
_UpperCamelCase , _UpperCamelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_UpperCamelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a__, (dist + 1, (nx, ny)) )
_UpperCamelCase = dist + 1
_UpperCamelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_UpperCAmelCase = parser.parse_args()
return args
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
def fn(a__: str ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase__ ( a__: List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_UpperCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_UpperCAmelCase = tf.train.Features(feature=a__ )
_UpperCAmelCase = tf.train.Example(features=a__ )
_UpperCAmelCase = example.SerializeToString()
records.append(a__ )
return records
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCAmelCase = min(len(a__ ) , args.limit )
_UpperCAmelCase = dataset.select(range(a__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCAmelCase = tokenize_function(a__ )
_UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a__: Optional[int] ):
# Concatenate all texts.
_UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
_UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCAmelCase = len(dataset_snapshot['input_ids'] )
_UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCAmelCase = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
_UpperCAmelCase = serialized_examples[i]
out_file.write(a__ )
print('Wrote file {} containing {} records'.format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = parse_args()
main(args)
| 329 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , a__ , )
if isinstance(a__ , torch.Tensor ):
return image
elif isinstance(a__ , PIL.Image.Image ):
__A : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__A , __A : Any = image[0].size
__A , __A : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__A : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__A : int = np.concatenate(a__ , axis=0 )
__A : Optional[Any] = np.array(a__ ).astype(np.floataa ) / 255.0
__A : Optional[Any] = image.transpose(0 , 3 , 1 , 2 )
__A : Any = 2.0 * image - 1.0
__A : Union[str, Any] = torch.from_numpy(a__ )
elif isinstance(image[0] , torch.Tensor ):
__A : Optional[Any] = torch.cat(a__ , dim=0 )
return image
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if isinstance(a__ , torch.Tensor ):
return mask
elif isinstance(a__ , PIL.Image.Image ):
__A : Union[str, Any] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__A , __A : int = mask[0].size
__A , __A : Any = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__A : Dict = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
__A : List[str] = np.concatenate(a__ , axis=0 )
__A : Dict = mask.astype(np.floataa ) / 255.0
__A : Dict = 0
__A : Any = 1
__A : str = torch.from_numpy(a__ )
elif isinstance(mask[0] , torch.Tensor ):
__A : Tuple = torch.cat(a__ , dim=0 )
return mask
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : UNetaDModel
UpperCamelCase : RePaintScheduler
def __init__( self , _A , _A ):
super().__init__()
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , _A , _A , _A = 250 , _A = 0.0 , _A = 10 , _A = 10 , _A = None , _A = "pil" , _A = True , ):
__A : int = image
__A : int = _preprocess_image(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = original_image.to(device=self.device , dtype=self.unet.dtype )
__A : Dict = _preprocess_mask(_SCREAMING_SNAKE_CASE )
__A : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
__A : List[str] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__A : List[str] = original_image.shape
__A : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.device )
__A : List[str] = eta
__A : Union[str, Any] = self.scheduler.timesteps[0] + 1
__A : Optional[Any] = generator[0] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__A : List[str] = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__A : List[str] = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__A : Any = self.scheduler.undo_step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : str = t
__A : str = (image / 2 + 0.5).clamp(0 , 1 )
__A : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__A : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 280 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = []
for step in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(a__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , a__ )
_UpperCAmelCase = torch.load(a__ )
scheduler.load_state_dict(a__ )
return lrs
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __a ( unittest.TestCase ):
_a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None
_a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_a : List[Any] = 10
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' )
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = fn
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a_ ( _lowerCAmelCase ) -> List[Tuple[int, ...]]:
__lowerCamelCase : int = []
if isinstance(a__ ,a__ ):
for v in tree.values():
shapes.extend(_fetch_dims(a__ ) )
elif isinstance(a__ ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(a__ ) )
elif isinstance(a__ ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Tuple[int, ...]:
__lowerCamelCase : Any = []
for d in reversed(a__ ):
idx.append(flat_idx % d )
__lowerCamelCase : Dict = flat_idx // d
return tuple(reversed(a__ ) )
@torch.jit.ignore
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,) -> List[Tuple[slice, ...]]:
def reduce_edge_list(_lowerCAmelCase ) -> None:
__lowerCamelCase : Optional[int] = True
for i in range(len(a__ ) ):
__lowerCamelCase : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
__lowerCamelCase : Optional[int] = l[reversed_idx]
if start_edges is None:
__lowerCamelCase : List[Any] = [s == 0 for s in start]
reduce_edge_list(a__ )
if end_edges is None:
__lowerCamelCase : Optional[int] = [e == (d - 1) for e, d in zip(a__ ,a__ )]
reduce_edge_list(a__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(a__ ) == 0:
return [()]
elif len(a__ ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[Any] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(a__ ,a__ ):
if s == e:
path_list.append(slice(a__ ,s + 1 ) )
else:
break
__lowerCamelCase : Optional[Any] = tuple(a__ )
__lowerCamelCase : Optional[int] = len(a__ )
# start == end, and we're done
if divergence_idx == len(a__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCamelCase : List[str] = start[divergence_idx]
return tuple(
path + (slice(a__ ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCamelCase : List[Any] = end[divergence_idx]
return tuple(
path + (slice(a__ ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowerCamelCase : Tuple = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> torch.Tensor:
__lowerCamelCase : Union[str, Any] = t.shape[:no_batch_dims]
__lowerCamelCase : int = list(_flat_idx_to_idx(a__ ,a__ ) )
# _get_minimal_slice_set is inclusive
__lowerCamelCase : Dict = list(_flat_idx_to_idx(flat_end - 1 ,a__ ) )
# Get an ordered list of slices to perform
__lowerCamelCase : Union[str, Any] = _get_minimal_slice_set(
a__ ,a__ ,a__ ,)
__lowerCamelCase : Dict = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,) -> Any:
if not (len(a__ ) > 0):
raise ValueError('Must provide at least one input' )
__lowerCamelCase : int = [shape[:no_batch_dims] for shape in _fetch_dims(a__ )]
__lowerCamelCase : int = tuple([max(a__ ) for s in zip(*a__ )] )
def _prep_inputs(_lowerCAmelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowerCamelCase : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowerCamelCase : str = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
__lowerCamelCase : int = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowerCamelCase : Any = tensor_tree_map(_prep_inputs ,a__ )
__lowerCamelCase : List[Any] = None
if _out is not None:
__lowerCamelCase : List[Any] = tensor_tree_map(lambda _lowerCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
__lowerCamelCase : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowerCamelCase : str = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCAmelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = prepped_outputs
for _ in range(a__ ):
# Chunk the input
if not low_mem:
__lowerCamelCase : Dict = _select_chunk
else:
__lowerCamelCase : Dict = partial(
_chunk_slice ,flat_start=a__ ,flat_end=min(a__ ,i + chunk_size ) ,no_batch_dims=len(a__ ) ,)
__lowerCamelCase : Union[str, Any] = tensor_tree_map(a__ ,a__ )
# Run the layer on the chunk
__lowerCamelCase : Optional[Any] = layer(**a__ )
# Allocate space for the output
if out is None:
__lowerCamelCase : Union[str, Any] = tensor_tree_map(lambda _lowerCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,a__ )
# Put the chunk in its pre-allocated space
if isinstance(a__ ,a__ ):
def assign(_lowerCAmelCase ,_lowerCAmelCase ) -> None:
for k, v in da.items():
if isinstance(a__ ,a__ ):
assign(a__ ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowerCamelCase : Union[str, Any] = da[k]
assign(a__ ,a__ )
elif isinstance(a__ ,a__ ):
for xa, xa in zip(a__ ,a__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowerCamelCase : Dict = xa
elif isinstance(a__ ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowerCamelCase : Tuple = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
__lowerCamelCase : List[Any] = tensor_tree_map(lambda _lowerCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) ,a__ )
return out
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _a : Dict = 512 , ) -> Union[str, Any]:
__lowerCamelCase : str = max_chunk_size
__lowerCamelCase : List[Any] = None
__lowerCamelCase : Optional[Any] = None
def _lowercase ( self : Optional[int] , _a : List[str] , _a : Dict , _a : List[Any] ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowerCamelCase : Tuple = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__lowerCamelCase : int = [c for c in candidates if c > min_chunk_size]
__lowerCamelCase : List[str] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_a : Dict ) -> bool:
try:
with torch.no_grad():
fn(*_SCREAMING_SNAKE_CASE , chunk_size=_SCREAMING_SNAKE_CASE )
return True
except RuntimeError:
return False
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : int = len(_SCREAMING_SNAKE_CASE ) - 1
while i > min_viable_chunk_size_index:
__lowerCamelCase : str = test_chunk_size(candidates[i] )
if not viable:
__lowerCamelCase : Tuple = (min_viable_chunk_size_index + i) // 2
else:
__lowerCamelCase : Tuple = i
__lowerCamelCase : Optional[Any] = (i + len(_SCREAMING_SNAKE_CASE ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase ( self : Dict , _a : Tuple , _a : Dict ) -> bool:
__lowerCamelCase : Optional[int] = True
for aa, aa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert type(_SCREAMING_SNAKE_CASE ) == type(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
consistent &= self._compare_arg_caches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Tuple = [v for _, v in sorted(aa.items() , key=lambda _a : x[0] )]
__lowerCamelCase : List[Any] = [v for _, v in sorted(aa.items() , key=lambda _a : x[0] )]
consistent &= self._compare_arg_caches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
consistent &= aa == aa
return consistent
def _lowercase ( self : int , _a : Tuple , _a : List[str] , _a : str , ) -> int:
__lowerCamelCase : int = True
__lowerCamelCase : List[Any] = tree_map(lambda _a : a.shape if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) else a , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = self._compare_arg_caches(self.cached_arg_data , _SCREAMING_SNAKE_CASE )
else:
# Otherwise, we can reuse the precomputed value
__lowerCamelCase : Any = False
if not consistent:
__lowerCamelCase : Dict = self._determine_favorable_chunk_size(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
__lowerCamelCase : Optional[int] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 208 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :Any = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = original_name.split('.' )[0]
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] )
_UpperCAmelCase = orig_block_num - offset
_UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_UpperCAmelCase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase = key[: key.find('proj' )]
_UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' )
_UpperCAmelCase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' )
if "norm2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = model_name[-3:]
_UpperCAmelCase = 1_0_0_0
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_0_0_0)
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase = [2, 2, 6, 2]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s24":
_UpperCAmelCase = [4, 4, 1_2, 4]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.9
elif size == "m36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
elif size == "m48":
_UpperCAmelCase = [8, 8, 2_4, 8]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
# Prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(a__ )
# create HuggingFace model and load state dict
_UpperCAmelCase = PoolFormerForImageClassification(a__ )
model.load_state_dict(a__ )
model.eval()
# Define image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_UpperCAmelCase = model(a__ )
_UpperCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Optional[Any] = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __A( a ):
snake_case_ = 'deit'
def __init__( self , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=224 , _snake_case=16 , _snake_case=3 , _snake_case=True , _snake_case=16 , **_snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = encoder_stride
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-4 | 6 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = process
_UpperCAmelCase = params
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dataset[i]
_UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params )
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = loader
_UpperCAmelCase = infer
_UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase = None
_UpperCAmelCase = loader_batch_size
# Internal bookkeeping
_UpperCAmelCase = None
_UpperCAmelCase = None
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase = next(self.iterator )
_UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase = processed
_UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
_UpperCAmelCase = None
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.subiterator is None:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCAmelCase = next(self.subiterator )
return processed
class __a ( UpperCAmelCase ):
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
_UpperCAmelCase = processed
_UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_UpperCAmelCase = processed
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
return accumulator
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = key
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = keya
_UpperCAmelCase = keya
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 329 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=7, __a=3, __a=30, __a=400, __a=True, __a=None, __a=True, __a=[0.5, 0.5, 0.5], __a=[0.5, 0.5, 0.5], __a=True, __a=1 / 255, __a=True, ):
'''simple docstring'''
_lowerCAmelCase : str = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
_lowerCAmelCase : str = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Union[str, Any] = min_resolution
_lowerCAmelCase : Optional[int] = max_resolution
_lowerCAmelCase : Tuple = do_resize
_lowerCAmelCase : List[str] = size
_lowerCAmelCase : List[Any] = do_normalize
_lowerCAmelCase : Optional[Any] = image_mean
_lowerCAmelCase : Optional[int] = image_std
_lowerCAmelCase : Tuple = do_rescale
_lowerCAmelCase : str = rescale_factor
_lowerCAmelCase : Tuple = do_pad
def snake_case__ ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self, __a, __a=False):
'''simple docstring'''
if not batched:
_lowerCAmelCase : List[str] = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE, Image.Image):
_lowerCAmelCase , _lowerCAmelCase : int = image.size
else:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase : Tuple = int(self.size["shortest_edge"] * h / w)
_lowerCAmelCase : Optional[Any] = self.size["shortest_edge"]
elif w > h:
_lowerCAmelCase : Any = self.size["shortest_edge"]
_lowerCAmelCase : Dict = int(self.size["shortest_edge"] * w / h)
else:
_lowerCAmelCase : List[Any] = self.size["shortest_edge"]
_lowerCAmelCase : Union[str, Any] = self.size["shortest_edge"]
else:
_lowerCAmelCase : str = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
_lowerCAmelCase : Tuple = max(_SCREAMING_SNAKE_CASE, key=lambda __a: item[0])[0]
_lowerCAmelCase : str = max(_SCREAMING_SNAKE_CASE, key=lambda __a: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = DeformableDetrImageProcessingTester(self)
@property
def snake_case__ ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE, "image_mean"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE, "image_std"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE, "do_normalize"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE, "do_resize"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE, "do_rescale"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE, "do_pad"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE, "size"))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333})
self.assertEqual(image_processor.do_pad, _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Dict = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84})
self.assertEqual(image_processor.do_pad, _SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=_SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE, Image.Image)
# Test not batched input
_lowerCAmelCase : str = image_processing(image_inputs[0], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : str = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE, batched=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[Any] = image_processing(_SCREAMING_SNAKE_CASE, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=_SCREAMING_SNAKE_CASE, numpify=_SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE, np.ndarray)
# Test not batched input
_lowerCAmelCase : Optional[int] = image_processing(image_inputs[0], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(_SCREAMING_SNAKE_CASE, return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE, batched=_SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=_SCREAMING_SNAKE_CASE, torchify=_SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE, torch.Tensor)
# Test not batched input
_lowerCAmelCase : str = image_processing(image_inputs[0], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase : Any = image_processing(_SCREAMING_SNAKE_CASE, return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE, batched=_SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f:
_lowerCAmelCase : Any = json.loads(f.read())
_lowerCAmelCase : Optional[Any] = {"image_id": 3_9769, "annotations": target}
# encode them
_lowerCAmelCase : Tuple = DeformableDetrImageProcessor()
_lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE, annotations=_SCREAMING_SNAKE_CASE, return_tensors="pt")
# verify pixel values
_lowerCAmelCase : Any = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape, _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], _SCREAMING_SNAKE_CASE, atol=1E-4))
# verify area
_lowerCAmelCase : List[Any] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], _SCREAMING_SNAKE_CASE))
# verify boxes
_lowerCAmelCase : Dict = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[str] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], _SCREAMING_SNAKE_CASE, atol=1E-3))
# verify image_id
_lowerCAmelCase : Optional[int] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], _SCREAMING_SNAKE_CASE))
# verify is_crowd
_lowerCAmelCase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], _SCREAMING_SNAKE_CASE))
# verify class_labels
_lowerCAmelCase : List[str] = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], _SCREAMING_SNAKE_CASE))
# verify orig_size
_lowerCAmelCase : Union[str, Any] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], _SCREAMING_SNAKE_CASE))
# verify size
_lowerCAmelCase : str = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], _SCREAMING_SNAKE_CASE))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f:
_lowerCAmelCase : int = json.loads(f.read())
_lowerCAmelCase : List[str] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
_lowerCAmelCase : Dict = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
# encode them
_lowerCAmelCase : Union[str, Any] = DeformableDetrImageProcessor(format="coco_panoptic")
_lowerCAmelCase : List[str] = image_processing(images=_SCREAMING_SNAKE_CASE, annotations=_SCREAMING_SNAKE_CASE, masks_path=_SCREAMING_SNAKE_CASE, return_tensors="pt")
# verify pixel values
_lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape, _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Tuple = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], _SCREAMING_SNAKE_CASE, atol=1E-4))
# verify area
_lowerCAmelCase : List[str] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], _SCREAMING_SNAKE_CASE))
# verify boxes
_lowerCAmelCase : Union[str, Any] = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], _SCREAMING_SNAKE_CASE, atol=1E-3))
# verify image_id
_lowerCAmelCase : Optional[int] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], _SCREAMING_SNAKE_CASE))
# verify is_crowd
_lowerCAmelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], _SCREAMING_SNAKE_CASE))
# verify class_labels
_lowerCAmelCase : Dict = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], _SCREAMING_SNAKE_CASE))
# verify masks
_lowerCAmelCase : Any = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item(), _SCREAMING_SNAKE_CASE)
# verify orig_size
_lowerCAmelCase : Tuple = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], _SCREAMING_SNAKE_CASE))
# verify size
_lowerCAmelCase : Tuple = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], _SCREAMING_SNAKE_CASE))
| 36 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __a ( UpperCAmelCase ):
_a : str = 'data2vec-text'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 329 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def UpperCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
__SCREAMING_SNAKE_CASE = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [torch.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE = [[1_764, 2_646]]
__SCREAMING_SNAKE_CASE = [[683, 1_024]]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
_SCREAMING_SNAKE_CASE , torch.tensor(_SCREAMING_SNAKE_CASE ) , torch.tensor(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
__SCREAMING_SNAKE_CASE = [np.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(_SCREAMING_SNAKE_CASE , np.array(_SCREAMING_SNAKE_CASE ) , np.array(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__SCREAMING_SNAKE_CASE = [[1, 0], [0, 1]]
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = processor.post_process_masks(_SCREAMING_SNAKE_CASE , np.array(_SCREAMING_SNAKE_CASE ) , np.array(_SCREAMING_SNAKE_CASE ) )
@require_vision
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
__SCREAMING_SNAKE_CASE = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [tf.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE = [[1_764, 2_646]]
__SCREAMING_SNAKE_CASE = [[683, 1_024]]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
_SCREAMING_SNAKE_CASE , tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) , tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
__SCREAMING_SNAKE_CASE = [np.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
_SCREAMING_SNAKE_CASE , np.array(_SCREAMING_SNAKE_CASE ) , np.array(_SCREAMING_SNAKE_CASE ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__SCREAMING_SNAKE_CASE = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
_SCREAMING_SNAKE_CASE , np.array(_SCREAMING_SNAKE_CASE ) , np.array(_SCREAMING_SNAKE_CASE ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = [tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )]
__SCREAMING_SNAKE_CASE = [torch.tensor(_SCREAMING_SNAKE_CASE )]
__SCREAMING_SNAKE_CASE = [[1_764, 2_646]]
__SCREAMING_SNAKE_CASE = [[683, 1_024]]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )["""pixel_values"""].numpy()
__SCREAMING_SNAKE_CASE = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )["""pixel_values"""].numpy()
__SCREAMING_SNAKE_CASE = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )["""pixel_values"""].numpy()
__SCREAMING_SNAKE_CASE = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
| 267 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_a : Optional[int] = False
_a : List[str] = False
_a : List[str] = False
_a : Optional[int] = False
_a : Tuple = False
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has'''
f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class __a ( unittest.TestCase , UpperCAmelCase ):
_a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_a : Any = MaskFormerSwinConfig
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 329 | 0 |
snake_case__ : str = 256
# Modulus to hash a string
snake_case__ : str = 1000003
def _a ( lowerCamelCase: str , lowerCamelCase: str ) -> bool:
'''simple docstring'''
__A = len(a__ )
__A = len(a__ )
if p_len > t_len:
return False
__A = 0
__A = 0
__A = 1
# Calculating the hash of pattern and substring of text
for i in range(a__ ):
__A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _a ( ) -> None:
'''simple docstring'''
__A = '''abc1abc12'''
__A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(a__ , a__ ) and not rabin_karp(a__ , a__ )
# Test 2)
__A = '''ABABX'''
__A = '''ABABZABABYABABX'''
assert rabin_karp(a__ , a__ )
# Test 3)
__A = '''AAAB'''
__A = '''ABAAAAAB'''
assert rabin_karp(a__ , a__ )
# Test 4)
__A = '''abcdabcy'''
__A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(a__ , a__ )
# Test 5)
__A = '''Lü'''
__A = '''Lüsai'''
assert rabin_karp(a__ , a__ )
__A = '''Lue'''
assert not rabin_karp(a__ , a__ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 117 |
from collections.abc import Generator
def lowerCAmelCase__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 329 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : Any = RobertaTokenizer
UpperCAmelCase__ : Dict = RobertaTokenizerFast
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Dict = {'cls_token': '<s>'}
def lowercase_ ( self :Any ) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__A = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__A = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__A = {'unk_token': '<unk>'}
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
def lowercase_ ( self :Union[str, Any] , **_A :Tuple ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowercase_ ( self :str , **_A :Any ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Union[str, Any] , _A :Dict ) -> Union[str, Any]:
'''simple docstring'''
__A = 'lower newer'
__A = 'lower newer'
return input_text, output_text
def lowercase_ ( self :Dict ) -> int:
'''simple docstring'''
__A = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__A = 'lower newer'
__A = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__A = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A = tokens + [tokenizer.unk_token]
__A = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Tuple ) -> int:
'''simple docstring'''
__A = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_SCREAMING_SNAKE_CASE ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_SCREAMING_SNAKE_CASE ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowercase_ ( self :List[Any] ) -> List[str]:
'''simple docstring'''
__A = self.tokenizer_class.from_pretrained('roberta-base' )
__A = tokenizer.encode('sequence builders' , add_special_tokens=_SCREAMING_SNAKE_CASE )
__A = tokenizer.encode('multi-sequence build' , add_special_tokens=_SCREAMING_SNAKE_CASE )
__A = tokenizer.encode(
'sequence builders' , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
__A = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
__A = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
__A = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase_ ( self :Any ) -> Any:
'''simple docstring'''
__A = self.get_tokenizer()
__A = 'Encode this sequence.'
__A = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__A = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
__A = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
__A = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__A = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__A = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
__A = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )} ) # mask token has a left space
__A = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
__A = 'Encode <mask> sequence'
__A = 'Encode <mask>sequence'
__A = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__A = encoded.index(_SCREAMING_SNAKE_CASE )
__A = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__A = encoded.index(_SCREAMING_SNAKE_CASE )
__A = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def lowercase_ ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__A = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__A = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__A = 'A, <mask> AllenNLP sentence.'
__A = tokenizer_r.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
__A = tokenizer_p.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__A = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__A = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__A = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE )
__A = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__A = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['add_prefix_space'] , _SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['trim_offsets'] , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Dict ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__A = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__A = F'{text_of_1_token} {text_of_1_token}'
__A = self.rust_tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE )
__A = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_SCREAMING_SNAKE_CASE ) + 1, len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , )
__A = self.rust_tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE )
__A = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_SCREAMING_SNAKE_CASE ) + 1, len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , )
__A = self.rust_tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE )
__A = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_SCREAMING_SNAKE_CASE ), len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , )
__A = self.rust_tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE )
__A = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_SCREAMING_SNAKE_CASE ), len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , )
__A = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__A = self.rust_tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE )
__A = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_SCREAMING_SNAKE_CASE ) + 1, 1 + len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , )
__A = self.rust_tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE )
__A = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_SCREAMING_SNAKE_CASE ), 1 + len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , )
__A = self.rust_tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE )
__A = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_SCREAMING_SNAKE_CASE ), 1 + len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , )
| 161 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 30}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 329 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : List[str] = True , __lowercase : List[str] = False ):
"""simple docstring"""
snake_case_ = scheduler
snake_case_ = optimizers if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) else [optimizers]
snake_case_ = split_batches
snake_case_ = step_with_optimizer
snake_case_ = GradientState()
def snake_case__ ( self : int , *__lowercase : List[Any] , **__lowercase : Optional[int] ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
snake_case_ = AcceleratorState().num_processes
for _ in range(_SCREAMING_SNAKE_CASE ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
self.scheduler.step(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Dict ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def snake_case__ ( self : Any ):
"""simple docstring"""
return self.scheduler.state_dict()
def snake_case__ ( self : List[str] , __lowercase : int ):
"""simple docstring"""
self.scheduler.load_state_dict(_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
return self.scheduler.get_lr()
def snake_case__ ( self : int , *__lowercase : int , **__lowercase : Tuple ):
"""simple docstring"""
return self.scheduler.print_lr(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 187 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20}
_UpperCAmelCase = do_thumbnail
_UpperCAmelCase = do_align_axis
_UpperCAmelCase = do_pad
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[str] = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 329 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = 'realm'
def __init__( self : Tuple , __UpperCAmelCase : Union[str, Any]=30_522 , __UpperCAmelCase : Optional[Any]=768 , __UpperCAmelCase : Optional[Any]=128 , __UpperCAmelCase : Any=12 , __UpperCAmelCase : List[Any]=12 , __UpperCAmelCase : int=8 , __UpperCAmelCase : Dict=3_072 , __UpperCAmelCase : Tuple="gelu_new" , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Optional[Any]=512 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : Optional[int]=1e-1_2 , __UpperCAmelCase : str=256 , __UpperCAmelCase : Tuple=10 , __UpperCAmelCase : Union[str, Any]=1e-3 , __UpperCAmelCase : str=5 , __UpperCAmelCase : List[str]=320 , __UpperCAmelCase : Optional[Any]=13_353_718 , __UpperCAmelCase : int=5_000 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[Any]=2 , **__UpperCAmelCase : Optional[Any] , ) ->Dict:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Common config
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = retriever_proj_size
a = num_hidden_layers
a = num_attention_heads
a = num_candidates
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = type_vocab_size
a = layer_norm_eps
# Reader config
a = span_hidden_size
a = max_span_width
a = reader_layer_norm_eps
a = reader_beam_size
a = reader_seq_len
# Retrieval config
a = num_block_records
a = searcher_beam_size
| 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : List[str] = 'openai-gpt'
_a : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = afn
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a_ = logging.get_logger("""transformers.models.speecht5""")
a_ = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
a_ = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
a_ = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
a_ = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
a_ = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
a_ = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
a_ = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
a_ = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
a_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a_ = []
a_ = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
a_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
a_ = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
a_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Union[str, Any] ):
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase )
if weight_type is not None:
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : str ):
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase ,__lowerCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : int ):
__lowerCamelCase = []
if task == "s2t":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2T
__lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCamelCase = None
__lowerCamelCase = MAPPING_T2S
__lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2S
__lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(_UpperCamelCase ,_UpperCamelCase ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,)
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCamelCase ,__lowerCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
__lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase )
if "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
__lowerCamelCase = '''weight'''
elif "running_mean" in name:
__lowerCamelCase = '''running_mean'''
elif "running_var" in name:
__lowerCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
__lowerCamelCase = '''num_batches_tracked'''
else:
__lowerCamelCase = None
set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Tuple ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[str] ):
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any]=None ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : str=None ,):
if config_path is not None:
__lowerCamelCase = SpeechTaConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCamelCase = SpeechTaConfig()
if task == "s2t":
__lowerCamelCase = config.max_text_positions
__lowerCamelCase = SpeechTaForSpeechToText(_UpperCamelCase )
elif task == "t2s":
__lowerCamelCase = 18_76
__lowerCamelCase = 6_00
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForTextToSpeech(_UpperCamelCase )
elif task == "s2s":
__lowerCamelCase = 18_76
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForSpeechToSpeech(_UpperCamelCase )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCamelCase = SpeechTaTokenizer(_UpperCamelCase ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken('''<mask>''' ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
__lowerCamelCase = SpeechTaFeatureExtractor()
__lowerCamelCase = SpeechTaProcessor(tokenizer=_UpperCamelCase ,feature_extractor=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__lowerCamelCase = torch.load(_UpperCamelCase )
recursively_load_weights(fairseq_checkpoint['''model'''] ,_UpperCamelCase ,_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
a_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 330 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a__ ( _UpperCamelCase : int ):
for pegasus_name, hf_name in PATTERNS:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = DEFAULTS.copy()
cfg_kwargs.update(_UpperCamelCase )
__lowerCamelCase = PegasusConfig(**_UpperCamelCase )
__lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.model.state_dict()
__lowerCamelCase = {}
for k, v in tf_weights.items():
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__lowerCamelCase = v.T
__lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# save tokenizer first
__lowerCamelCase = Path(_UpperCamelCase ).parent.name
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
__lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCamelCase )
# convert model
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
__lowerCamelCase = task_specific_params
__lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
if args.save_dir is None:
a_ = Path(args.tf_ckpt_path).parent.name
a_ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 330 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.