code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Any = {'vocab_file': 'sentencepiece.bpe.model'}
a__ : List[Any] = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
a__ : Tuple = {
'camembert-base': 5_1_2,
}
a__ : Optional[int] = '▁'
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=["<s>NOTUSED", "</s>NOTUSED"] , lowercase = None , **lowercase , ) -> List[str]:
__UpperCamelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
__UpperCamelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__UpperCamelCase = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
__UpperCamelCase = len(self.fairseq_tokens_to_ids )
__UpperCamelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Optional[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCamelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase ) -> Optional[int]:
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def __lowerCamelCase ( self , lowercase ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_snake_case ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_snake_case )
def __lowerCamelCase ( self , lowercase ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self , lowercase ) -> Any:
__UpperCamelCase = []
__UpperCamelCase = """"""
__UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
__UpperCamelCase = True
__UpperCamelCase = []
else:
current_sub_tokens.append(_snake_case )
__UpperCamelCase = False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def __getstate__( self ) -> Any:
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self , lowercase ) -> List[str]:
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Optional[Any]:
if not os.path.isdir(_snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCamelCase = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 601 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE__ = [0, 2_5, 5_0]
SCREAMING_SNAKE_CASE__ = [2_5, 5_0, 7_5]
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE__ = np.ones(7_5)
SCREAMING_SNAKE_CASE__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 9 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A: Dict = logging.get_logger(__name__)
_A: Optional[int] = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : str = "donut-swin"
_A : Tuple = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __A=224 , __A=4 , __A=3 , __A=96 , __A=[2, 2, 6, 2] , __A=[3, 6, 12, 24] , __A=7 , __A=4.0 , __A=True , __A=0.0 , __A=0.0 , __A=0.1 , __A="gelu" , __A=False , __A=0.0_2 , __A=1E-5 , **__A , ):
super().__init__(**_snake_case )
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = len(_snake_case )
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
| 126 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _a ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _a ( self : int , _snake_case : int , _snake_case : List[str] ):
"""simple docstring"""
A__ = object_detector(examples[0] , threshold=0.0 )
A__ = len(_snake_case )
self.assertGreater(_snake_case , 0 )
self.assertEqual(
_snake_case , [
{
'score': ANY(_snake_case ),
'label': ANY(_snake_case ),
'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )},
}
for i in range(_snake_case )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@require_torch
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
A__ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
A__ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : int ):
"""simple docstring"""
pass
@require_torch
@slow
def _a ( self : str ):
"""simple docstring"""
A__ = 0.2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = 2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 9 | 0 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
__magic_name__ : Any = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
UpperCamelCase : List[Any] = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models )
UpperCamelCase : Union[str, Any] = config_class.from_json_file(__UpperCamelCase )
UpperCamelCase : Dict = True
UpperCamelCase : str = True
print(f"""Building TensorFlow model from configuration: {config}""" )
UpperCamelCase : int = model_class(__UpperCamelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
UpperCamelCase : Optional[Any] = cached_file(
__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
UpperCamelCase : str = load_pytorch_checkpoint_in_tfa_model(__UpperCamelCase , __UpperCamelCase )
if compare_with_pt_model:
UpperCamelCase : List[Any] = tf_model(tf_model.dummy_inputs , training=__UpperCamelCase ) # build the network
UpperCamelCase : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
UpperCamelCase : Optional[int] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase , config=__UpperCamelCase , state_dict=__UpperCamelCase )
with torch.no_grad():
UpperCamelCase : Dict = pt_model(**pt_model.dummy_inputs )
UpperCamelCase : List[str] = pto[0].numpy()
UpperCamelCase : Tuple = tfo[0].numpy()
UpperCamelCase : List[Any] = np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(__UpperCamelCase , save_format="""h5""" )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , ):
if args_model_type is None:
UpperCamelCase : Union[str, Any] = list(MODEL_CLASSES.keys() )
else:
UpperCamelCase : str = [args_model_type]
for j, model_type in enumerate(__UpperCamelCase , start=1 ):
print("""=""" * 100 )
print(f""" Converting model type {j}/{len(__UpperCamelCase )}: {model_type}""" )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
UpperCamelCase : Optional[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
UpperCamelCase : Optional[Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__UpperCamelCase , __UpperCamelCase ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
UpperCamelCase : Dict = model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(__UpperCamelCase )}: {model_shortcut_name} - model_type {model_type}""" )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
UpperCamelCase : List[Any] = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models )
else:
UpperCamelCase : Optional[int] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
UpperCamelCase : Union[str, Any] = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models )
else:
UpperCamelCase : Any = model_shortcut_name
if os.path.isfile(__UpperCamelCase ):
UpperCamelCase : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__UpperCamelCase , pytorch_checkpoint_path=__UpperCamelCase , config_file=__UpperCamelCase , tf_dump_path=os.path.join(__UpperCamelCase , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__UpperCamelCase , )
if remove_cached_files:
os.remove(__UpperCamelCase )
os.remove(__UpperCamelCase )
if __name__ == "__main__":
__magic_name__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
__magic_name__ : Union[str, Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 102 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9 | 0 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 163 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> List[Any]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ):
A__ = []
for k, v in d.items():
A__ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
A__ = argparse.Namespace()
with open(__UpperCamelCase , 'r' ) as yaml_file:
try:
A__ = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader )
A__ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase , str(__UpperCamelCase ) ) )
return config
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = MobileViTVaConfig()
A__ = False
# dataset
if task_name.startswith('imagenet1k_' ):
A__ = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
A__ = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
A__ = 151
A__ = 512
A__ = 'ade20k-id2label.json'
A__ = True
elif task_name.startswith('voc_' ):
A__ = 21
A__ = 512
A__ = 'pascal-voc-id2label.json'
A__ = True
# orig_config
A__ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
A__ = getattr(__UpperCamelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__UpperCamelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A__ = getattr(__UpperCamelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A__ = getattr(__UpperCamelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
A__ = 'huggingface/label-files'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Dict:
if base_model:
A__ = ''
else:
A__ = 'mobilevitv2.'
A__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A__ = k[8:]
else:
A__ = k
if ".block." in k:
A__ = k_new.replace('.block.' , '.' )
if ".conv." in k:
A__ = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
A__ = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
A__ = k_new.replace('conv_1.' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A__ = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A__ = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
A__ = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A__ = [0, 1]
elif i == 4:
A__ = [0, 1, 2, 3]
elif i == 5:
A__ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A__ = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
A__ = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
A__ = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
A__ = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
A__ = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
A__ = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
A__ = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
A__ = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
A__ = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A ( __UpperCamelCase ) -> Tuple:
A__ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> str:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
A__ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
A__ = False
else:
A__ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
A__ = False
# remove and rename some keys of load the original model
A__ = checkpoint
remove_unused_keys(__UpperCamelCase )
A__ = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
A__ = outputs.logits
A__ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A__ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 9 | 0 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__magic_name__ = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__magic_name__ = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
__magic_name__ = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
"""
def _A ( __lowercase ):
"""simple docstring"""
def remove_articles(__lowercase ):
lowerCamelCase__ = re.compile(r"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(__UpperCamelCase , """ """ , __UpperCamelCase )
def white_space_fix(__lowercase ):
return " ".join(text.split() )
def remove_punc(__lowercase ):
lowerCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = [any(compute_exact(__UpperCamelCase , __UpperCamelCase ) for ref in refs ) for pred, refs in zip(__UpperCamelCase , __UpperCamelCase )]
return (sum(__UpperCamelCase ) / len(__UpperCamelCase )) * 100
def _A ( __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = [rgram for rgrams in rgramslist for rgram in rgrams]
lowerCamelCase__ = Counter(__UpperCamelCase )
lowerCamelCase__ = Counter(__UpperCamelCase )
lowerCamelCase__ = Counter()
for sgram, scount in sgramcounter.items():
lowerCamelCase__ = scount * numref
lowerCamelCase__ = Counter(__UpperCamelCase )
lowerCamelCase__ = Counter()
for cgram, ccount in cgramcounter.items():
lowerCamelCase__ = ccount * numref
# KEEP
lowerCamelCase__ = sgramcounter_rep & cgramcounter_rep
lowerCamelCase__ = keepgramcounter_rep & rgramcounter
lowerCamelCase__ = sgramcounter_rep & rgramcounter
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase__ = 1
lowerCamelCase__ = 1
if len(__UpperCamelCase ) > 0:
lowerCamelCase__ = keeptmpscorea / len(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCamelCase__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCamelCase__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCamelCase__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCamelCase__ = sgramcounter_rep - cgramcounter_rep
lowerCamelCase__ = delgramcounter_rep - rgramcounter
lowerCamelCase__ = sgramcounter_rep - rgramcounter
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase__ = 1
if len(__UpperCamelCase ) > 0:
lowerCamelCase__ = deltmpscorea / len(__UpperCamelCase )
# ADDITION
lowerCamelCase__ = set(__UpperCamelCase ) - set(__UpperCamelCase )
lowerCamelCase__ = set(__UpperCamelCase ) & set(__UpperCamelCase )
lowerCamelCase__ = set(__UpperCamelCase ) - set(__UpperCamelCase )
lowerCamelCase__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase__ = 1
lowerCamelCase__ = 1
if len(__UpperCamelCase ) > 0:
lowerCamelCase__ = addtmpscore / len(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
lowerCamelCase__ = addtmpscore / len(__UpperCamelCase )
lowerCamelCase__ = 0
if addscore_precision > 0 or addscore_recall > 0:
lowerCamelCase__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = len(__UpperCamelCase )
lowerCamelCase__ = ssent.split(""" """ )
lowerCamelCase__ = csent.split(""" """ )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for rsent in rsents:
lowerCamelCase__ = rsent.split(""" """ )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
ragramslist.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
lowerCamelCase__ = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
lowerCamelCase__ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
lowerCamelCase__ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
lowerCamelCase__ = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
lowerCamelCase__ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
lowerCamelCase__ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
lowerCamelCase__ = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
lowerCamelCase__ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
lowerCamelCase__ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(__UpperCamelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCamelCase__ = sum([delascore, delascore, delascore, delascore] ) / 4
lowerCamelCase__ = sum([addascore, addascore, addascore, addascore] ) / 4
lowerCamelCase__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _A ( __lowercase , __lowercase = True , __lowercase = "13a" , __lowercase = True ):
"""simple docstring"""
if lowercase:
lowerCamelCase__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCamelCase__ = sacrebleu.metrics.bleu._get_tokenizer(__UpperCamelCase )()(__UpperCamelCase )
else:
lowerCamelCase__ = sacrebleu.TOKENIZERS[tokenizer]()(__UpperCamelCase )
elif tokenizer == "moses":
lowerCamelCase__ = sacremoses.MosesTokenizer().tokenize(__UpperCamelCase , return_str=__UpperCamelCase , escape=__UpperCamelCase )
elif tokenizer == "penn":
lowerCamelCase__ = sacremoses.MosesTokenizer().penn_tokenize(__UpperCamelCase , return_str=__UpperCamelCase )
else:
lowerCamelCase__ = sentence
if not return_str:
lowerCamelCase__ = normalized_sent.split()
return normalized_sent
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
if not (len(__UpperCamelCase ) == len(__UpperCamelCase ) == len(__UpperCamelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
lowerCamelCase__ = 0
for src, pred, refs in zip(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
sari_score += SARIsent(normalize(__UpperCamelCase ) , normalize(__UpperCamelCase ) , [normalize(__UpperCamelCase ) for sent in refs] )
lowerCamelCase__ = sari_score / len(__UpperCamelCase )
return 100 * sari_score
def _A ( __lowercase , __lowercase , __lowercase="exp" , __lowercase=None , __lowercase=False , __lowercase=False , __lowercase=False , ):
"""simple docstring"""
lowerCamelCase__ = len(references[0] )
if any(len(__UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCamelCase__ = [[refs[i] for refs in references] for i in range(__UpperCamelCase )]
lowerCamelCase__ = sacrebleu.corpus_bleu(
__UpperCamelCase , __UpperCamelCase , smooth_method=__UpperCamelCase , smooth_value=__UpperCamelCase , force=__UpperCamelCase , lowercase=__UpperCamelCase , use_effective_order=__UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCamelCase__ = {}
result.update({"""sari""": compute_sari(sources=_snake_case , predictions=_snake_case , references=_snake_case )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_snake_case , references=_snake_case )} )
result.update({"""exact""": compute_em(predictions=_snake_case , references=_snake_case )} )
return result
| 129 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = '''docs/source/en/_toctree.yml'''
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
def A ( __UpperCamelCase=False ) -> str:
with open(__UpperCamelCase , encoding='utf-8' ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]['sections']
# Then to the model doc
A__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A__ = api_doc[model_idx]['sections']
A__ = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
A__ = False
for idx, modality_doc in modalities_docs:
A__ = modality_doc['sections']
A__ = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
A__ = True
if overwrite:
A__ = new_modality_doc
if diff:
if overwrite:
A__ = model_doc
A__ = api_doc
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 9 | 0 |
'''simple docstring'''
from collections import defaultdict
def snake_case ( a_ : Optional[int] , a_ : List[Any] ) -> bool:
"""simple docstring"""
UpperCamelCase_ : Any = first_str.lower().strip()
UpperCamelCase_ : List[Any] = second_str.lower().strip()
# Remove whitespace
UpperCamelCase_ : Union[str, Any] = first_str.replace(""" """ , """""" )
UpperCamelCase_ : int = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
return False
# Default values for count should be 0
UpperCamelCase_ : int = defaultdict(__UpperCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__UpperCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase =input("Enter the first string ").strip()
UpperCamelCase =input("Enter the second string ").strip()
UpperCamelCase =check_anagrams(input_a, input_b)
print(f"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 208 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : int ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
A__ : str = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 9 | 0 |
from __future__ import annotations
def UpperCAmelCase ( a_ , a_ ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
__A = number_of_bytes // partitions
__A = []
for i in range(__UpperCamelCase ):
__A = i * bytes_per_partition + 1
__A = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__magic_name__ : Tuple = logging.getLogger(__name__)
__magic_name__ : Any = """pytorch_model.bin"""
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase_ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
lowercase_ : Optional[str] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase_ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
lowercase_ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
lowercase_ : Optional[str] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "A csv or a json file containing the validation data."} )
lowercase_ : Optional[str] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "The name of the task to train on."} , )
lowercase_ : Optional[List[str]] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase_ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
lowercase_ : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
lowercase_ : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
lowercase_ : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowercase_ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
lowercase_ : Optional[bool] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
lowercase_ : Optional[bool] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
lowercase_ : Optional[bool] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
lowercase_ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
lowercase_ : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowercase_ : Optional[int] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "Random seed for initialization."} , )
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowerCAmelCase__ = dataset.filter(lambda __lowerCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowerCAmelCase__ = int(eval_result * len(__UpperCamelCase ) )
print(__UpperCamelCase )
lowerCAmelCase__ = dataset.sort('''probability''' , reverse=__UpperCamelCase )
lowerCAmelCase__ = dataset.select(range(__UpperCamelCase ) )
lowerCAmelCase__ = dataset.remove_columns(['''label''', '''probability'''] )
lowerCAmelCase__ = dataset.rename_column('''prediction''' , '''label''' )
lowerCAmelCase__ = dataset.map(lambda __lowerCAmelCase : {"label": idalabel[example["label"]]} )
lowerCAmelCase__ = dataset.shuffle(seed=args.seed )
lowerCAmelCase__ = os.path.join(__UpperCamelCase , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__UpperCamelCase , index=__UpperCamelCase )
else:
dataset.to_json(__UpperCamelCase )
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
lowerCAmelCase__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase__ = STModelArguments(model_name_or_path=__UpperCamelCase )
lowerCAmelCase__ = STDataArguments(train_file=__UpperCamelCase , infer_file=__UpperCamelCase )
lowerCAmelCase__ = STTrainingArguments(output_dir=__UpperCamelCase )
lowerCAmelCase__ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__UpperCamelCase ).items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for key, value in kwargs.items():
if hasattr(__UpperCamelCase , __UpperCamelCase ):
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Sanity checks
lowerCAmelCase__ = {}
lowerCAmelCase__ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowerCAmelCase__ = args.train_file
lowerCAmelCase__ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowerCAmelCase__ = args.eval_file
for key in data_files:
lowerCAmelCase__ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
lowerCAmelCase__ = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
lowerCAmelCase__ = F"""{args.output_dir}/self-train_iter-{{}}""".format
lowerCAmelCase__ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
accelerator.wait_for_everyone()
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
# Show the progress bar
lowerCAmelCase__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowerCAmelCase__ = data_dir_format(__UpperCamelCase )
assert os.path.exists(__UpperCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowerCAmelCase__ = os.path.join(__UpperCamelCase , '''stage-1''' )
lowerCAmelCase__ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__UpperCamelCase , __UpperCamelCase ):
arguments_dict.update({key: value} )
lowerCAmelCase__ = os.path.join(__UpperCamelCase , '''best-checkpoint''' , __UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , __UpperCamelCase , __UpperCamelCase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , __UpperCamelCase )
finetune(**__UpperCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCamelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , __UpperCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowerCAmelCase__ = os.path.join(__UpperCamelCase , '''best-checkpoint''' )
lowerCAmelCase__ = os.path.join(__UpperCamelCase , '''stage-2''' )
# Update arguments_dict
lowerCAmelCase__ = model_path
lowerCAmelCase__ = data_files['''train''']
lowerCAmelCase__ = current_output_dir
lowerCAmelCase__ = os.path.join(__UpperCamelCase , '''best-checkpoint''' , __UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , __UpperCamelCase , __UpperCamelCase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , __UpperCamelCase )
finetune(**__UpperCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCamelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , __UpperCamelCase )
lowerCAmelCase__ = iteration
lowerCAmelCase__ = data_dir_format(iteration + 1 )
lowerCAmelCase__ = AutoConfig.from_pretrained(os.path.join(__UpperCamelCase , '''best-checkpoint''' ) )
lowerCAmelCase__ = config.idalabel
lowerCAmelCase__ = os.path.join(__UpperCamelCase , '''eval_results_best-checkpoint.json''' )
lowerCAmelCase__ = os.path.join(__UpperCamelCase , '''test_results_best-checkpoint.json''' )
assert os.path.exists(__UpperCamelCase )
with open(__UpperCamelCase , '''r''' ) as f:
lowerCAmelCase__ = float(json.load(__UpperCamelCase )[args.eval_metric] )
lowerCAmelCase__ = os.path.join(__UpperCamelCase , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(__UpperCamelCase )
# Loading the dataset from local csv or json files.
lowerCAmelCase__ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
lowerCAmelCase__ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
shutil.copy(__UpperCamelCase , os.path.join(__UpperCamelCase , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__UpperCamelCase ):
shutil.copy(__UpperCamelCase , os.path.join(__UpperCamelCase , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
accelerator.wait_for_everyone()
lowerCAmelCase__ = os.path.join(__UpperCamelCase , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowerCAmelCase__ = eval_result
if best_iteration is None:
lowerCAmelCase__ = new_iteration
lowerCAmelCase__ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowerCAmelCase__ = new_iteration
lowerCAmelCase__ = new_eval_result
lowerCAmelCase__ = 0
else:
if new_eval_result == best_eval_result:
lowerCAmelCase__ = new_iteration
lowerCAmelCase__ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowerCAmelCase__ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , __UpperCamelCase )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCamelCase , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(__UpperCamelCase , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCamelCase , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__UpperCamelCase , '''eval_results_best-iteration.json''' ) , )
| 615 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["input_values", "attention_mask"]
def __init__( self : str , _snake_case : int = 1 , _snake_case : int = 1_60_00 , _snake_case : float = 0.0 , _snake_case : bool = False , _snake_case : int = 80 , _snake_case : int = 16 , _snake_case : int = 64 , _snake_case : str = "hann_window" , _snake_case : float = 1.0 , _snake_case : float = 80 , _snake_case : float = 76_00 , _snake_case : float = 1E-10 , _snake_case : int = 2 , _snake_case : bool = True , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
A__ = do_normalize
A__ = return_attention_mask
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = frame_signal_scale
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = reduction_factor
A__ = win_length * sampling_rate // 10_00
A__ = hop_length * sampling_rate // 10_00
A__ = optimal_fft_length(self.sample_size )
A__ = (self.n_fft // 2) + 1
A__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_snake_case )
A__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
A__ = np.array(_snake_case , np.intaa )
A__ = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A__ = padding_value
normed_input_values.append(_snake_case )
else:
A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _a ( self : Tuple , _snake_case : np.ndarray , ):
"""simple docstring"""
A__ = spectrogram(
_snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : List[str] , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[int] = None , **_snake_case : Tuple , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
else:
A__ = None
if audio_target is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
if inputs is None:
return inputs_target
else:
A__ = inputs_target['input_values']
A__ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def _a ( self : Tuple , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = False , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = isinstance(_snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
A__ = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [speech]
# needed to make pad() work on spectrogram inputs
A__ = self.feature_size
# convert into correct format for padding
if is_target:
A__ = [self._extract_mel_features(_snake_case ) for waveform in speech]
A__ = BatchFeature({'input_values': features} )
A__ = self.num_mel_bins
else:
A__ = BatchFeature({'input_values': speech} )
A__ = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
A__ = feature_size_hack
# convert input values to correct format
A__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A__ = (
attention_mask
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_snake_case , padding_value=self.padding_value )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 9 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
lowercase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = OmegaConf.load(__UpperCamelCase )
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = 'first_stage_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**__UpperCamelCase ).eval()
vqvae.load_state_dict(__UpperCamelCase )
A__ = UNetLDMModel(**__UpperCamelCase ).eval()
unet.load_state_dict(__UpperCamelCase )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , )
A__ = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipeline.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
for char in word:
_UpperCamelCase : Tuple = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = set()
for token in tokens:
_UpperCamelCase : List[Any] = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
_UpperCamelCase : List[str] = list(__UpperCamelCase )
return word_list
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCamelCase : Tuple = max([len(__UpperCamelCase ) for w in chinese_word_set] )
_UpperCamelCase : List[str] = bert_tokens
_UpperCamelCase, _UpperCamelCase : List[Any] = 0, len(__UpperCamelCase )
while start < end:
_UpperCamelCase : Optional[int] = True
if is_chinese(bert_word[start] ):
_UpperCamelCase : Any = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
_UpperCamelCase : str = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
_UpperCamelCase : List[Any] = "##" + bert_word[j]
_UpperCamelCase : Tuple = start + i
_UpperCamelCase : int = False
break
if single_word:
start += 1
return bert_word
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
_UpperCamelCase : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] ,tasks=["cws"] ).cws
_UpperCamelCase : int = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
_UpperCamelCase : Optional[Any] = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
_UpperCamelCase : Dict = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
_UpperCamelCase : List[str] = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
_UpperCamelCase : str = []
for id in input_ids:
_UpperCamelCase : int = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
_UpperCamelCase : Optional[int] = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
_UpperCamelCase : Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
_UpperCamelCase : Any = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
_UpperCamelCase : Dict = f.readlines()
_UpperCamelCase : Any = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase : Any = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase : Optional[Any] = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase : Tuple = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
_UpperCamelCase : List[Any] = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
lowerCamelCase__ = parser.parse_args()
main(args)
| 624 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : str = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase_ )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@dataclass(frozen=UpperCAmelCase_ )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase ( UpperCAmelCase_ ):
UpperCAmelCase__ = 42
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Tuple=False , UpperCAmelCase : bool = False , ) -> Tuple:
lowerCamelCase__ : Optional[int] = hans_processors[task]()
lowerCamelCase__ : int = os.path.join(
_snake_case , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_snake_case ) , _snake_case , ) , )
lowerCamelCase__ : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : int = label_list[2], label_list[1]
lowerCamelCase__ : Tuple = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : Optional[Any] = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowerCamelCase__ : str = torch.load(_snake_case )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowerCamelCase__ : Tuple = (
processor.get_dev_examples(_snake_case ) if evaluate else processor.get_train_examples(_snake_case )
)
logger.info('Training examples: %s' , len(_snake_case ) )
lowerCamelCase__ : List[Any] = hans_convert_examples_to_features(_snake_case , _snake_case , _snake_case , _snake_case )
logger.info('Saving features into cached file %s' , _snake_case )
torch.save(self.features , _snake_case )
def __len__( self : Dict ) -> Tuple:
return len(self.features )
def __getitem__( self : Any , UpperCAmelCase : Dict ) -> Tuple:
return self.features[i]
def A_ ( self : str ) -> str:
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
UpperCAmelCase__ = 42
def __init__( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = 128 , UpperCAmelCase : List[Any]=False , UpperCAmelCase : bool = False , ) -> int:
lowerCamelCase__ : List[Any] = hans_processors[task]()
lowerCamelCase__ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : Any = label_list[2], label_list[1]
lowerCamelCase__ : Dict = label_list
lowerCamelCase__ : Union[str, Any] = processor.get_dev_examples(_snake_case ) if evaluate else processor.get_train_examples(_snake_case )
lowerCamelCase__ : Optional[int] = hans_convert_examples_to_features(_snake_case , _snake_case , _snake_case , _snake_case )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_snake_case )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase__ : Any = tf.data.Dataset.from_generator(
_snake_case , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A_ ( self : Tuple ) -> Optional[int]:
return self.dataset
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : str , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
return self.features[i]
def A_ ( self : Any ) -> Any:
return self.label_list
class lowerCAmelCase ( UpperCAmelCase_ ):
def A_ ( self : List[str] , UpperCAmelCase : int ) -> str:
return self._create_examples(self._read_tsv(os.path.join(_snake_case , 'heuristics_train_set.txt' ) ) , 'train' )
def A_ ( self : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
return self._create_examples(self._read_tsv(os.path.join(_snake_case , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def A_ ( self : Optional[Any] ) -> List[str]:
return ["contradiction", "entailment", "neutral"]
def A_ ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Any ) -> Optional[int]:
lowerCamelCase__ : Tuple = []
for i, line in enumerate(_snake_case ):
if i == 0:
continue
lowerCamelCase__ : List[str] = '%s-%s' % (set_type, line[0])
lowerCamelCase__ : Tuple = line[5]
lowerCamelCase__ : int = line[6]
lowerCamelCase__ : int = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCamelCase__ : List[Any] = line[0]
examples.append(InputExample(guid=_snake_case , text_a=_snake_case , text_b=_snake_case , label=_snake_case , pairID=_snake_case ) )
return examples
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
lowerCamelCase__ : Optional[Any] = {label: i for i, label in enumerate(__UpperCamelCase )}
lowerCamelCase__ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(__UpperCamelCase ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCamelCase__ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=__UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' , truncation=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , )
lowerCamelCase__ : Optional[int] = label_map[example.label] if example.label in label_map else 0
lowerCamelCase__ : Optional[Any] = int(example.pairID )
features.append(InputFeatures(**__UpperCamelCase , label=__UpperCamelCase , pairID=__UpperCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_UpperCAmelCase : List[str] = {
"""hans""": 3,
}
_UpperCAmelCase : List[str] = {
"""hans""": HansProcessor,
}
| 295 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase__ ( unittest.TestCase):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=1_8 , lowercase=3_0 , lowercase=4_0_0 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.48_145_466, 0.4_578_275, 0.40_821_073] , lowercase=[0.26_862_954, 0.26_130_258, 0.27_577_711] , lowercase=True , ) -> Dict:
__UpperCamelCase = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__UpperCamelCase = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean
__UpperCamelCase = image_std
__UpperCamelCase = do_convert_rgb
def __lowerCamelCase ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __lowerCamelCase ( self , lowercase=False , lowercase=False , lowercase=False ) -> Union[str, Any]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__UpperCamelCase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__UpperCamelCase = []
for i in range(self.batch_size ):
__UpperCamelCase , __UpperCamelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__UpperCamelCase = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
if torchify:
__UpperCamelCase = [torch.from_numpy(_snake_case ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=_snake_case )
@property
def __lowerCamelCase ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , """do_resize""" ) )
self.assertTrue(hasattr(_snake_case , """size""" ) )
self.assertTrue(hasattr(_snake_case , """do_center_crop""" ) )
self.assertTrue(hasattr(_snake_case , """center_crop""" ) )
self.assertTrue(hasattr(_snake_case , """do_normalize""" ) )
self.assertTrue(hasattr(_snake_case , """image_mean""" ) )
self.assertTrue(hasattr(_snake_case , """image_std""" ) )
self.assertTrue(hasattr(_snake_case , """do_convert_rgb""" ) )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def __lowerCamelCase ( self ) -> Dict:
pass
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCamelCase = image_processing(_snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCamelCase = image_processing(_snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCamelCase = image_processing(_snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_snake_case )
__UpperCamelCase = 3
@property
def __lowerCamelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , """do_resize""" ) )
self.assertTrue(hasattr(_snake_case , """size""" ) )
self.assertTrue(hasattr(_snake_case , """do_center_crop""" ) )
self.assertTrue(hasattr(_snake_case , """center_crop""" ) )
self.assertTrue(hasattr(_snake_case , """do_normalize""" ) )
self.assertTrue(hasattr(_snake_case , """image_mean""" ) )
self.assertTrue(hasattr(_snake_case , """image_std""" ) )
self.assertTrue(hasattr(_snake_case , """do_convert_rgb""" ) )
def __lowerCamelCase ( self ) -> List[Any]:
pass
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCamelCase = image_processing(_snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 601 |
from __future__ import annotations
from typing import Any
def A ( __UpperCamelCase ) -> int:
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=_snake_case , )
assert hasattr(self , 'env' )
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__UpperCAmelCase = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_snake_case , instance_count=_snake_case , instance_type=self.instance_type , debugger_hook_config=_snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_snake_case , py_version='py36' , )
def __lowerCamelCase ( self , __A ):
TrainingJobAnalytics(_snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = self.create_estimator(_snake_case )
# run training
estimator.fit()
# result dataframe
__UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _snake_case )
| 126 |
from __future__ import annotations
def A ( __UpperCamelCase = 4 ) -> list[list[int]]:
A__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = matrix[::-1]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [x[::-1] for x in matrix]
return matrix
def A ( __UpperCamelCase ) -> None:
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 9 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__magic_name__ : Any = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def UpperCamelCase (SCREAMING_SNAKE_CASE=None ):
if subparsers is not None:
UpperCamelCase : Optional[Any] = subparsers.add_parser("""tpu-config""" , description=_description )
else:
UpperCamelCase : List[str] = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
UpperCamelCase : Optional[Any] = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=__UpperCamelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=__UpperCamelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
UpperCamelCase : str = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=__UpperCamelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
UpperCamelCase : Any = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCamelCase : Optional[int] = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase : Any = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase : Dict = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
UpperCamelCase : Dict = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , __UpperCamelCase ):
UpperCamelCase : int = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
UpperCamelCase : Optional[int] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __UpperCamelCase ):
UpperCamelCase : Optional[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase : Optional[int] = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
UpperCamelCase : Union[str, Any] = """; """.join(__UpperCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase : List[str] = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {" ".join(__UpperCamelCase )}""" )
return
subprocess.run(__UpperCamelCase )
print("""Successfully setup pod.""" )
def UpperCamelCase ():
UpperCamelCase : List[str] = tpu_command_parser()
UpperCamelCase : Union[str, Any] = parser.parse_args()
tpu_command_launcher(__UpperCamelCase )
| 102 |
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
A__ = 10
return solutions
def A ( __UpperCamelCase = 2 ) -> int:
A__ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
A__ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 9 | 0 |
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[str]:
"""simple docstring"""
__snake_case = set()
# keep track of all the paths to be checked
__snake_case = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__snake_case = queue.pop(0 )
# get the last node from the path
__snake_case = path[-1]
if node not in explored:
__snake_case = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__snake_case = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__snake_case = [start]
__snake_case = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
__snake_case = {start: 0, target: -1}
while queue:
__snake_case = queue.pop(0 )
if node == target:
__snake_case = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
__snake_case = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 163 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : str = "▁" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[str, AddedToken] = "<unk>" , SCREAMING_SNAKE_CASE_ : Union[str, AddedToken] = "</s>" , SCREAMING_SNAKE_CASE_ : Union[str, AddedToken] = "<pad>" , ):
lowerCamelCase__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
lowerCamelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase__ = token_dict["""token"""]
lowerCamelCase__ = Tokenizer(Unigram() )
lowerCamelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
lowerCamelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_snake_case , add_prefix_space=_snake_case ),
pre_tokenizers.Digits(individual_digits=_snake_case ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase__ = decoders.Metaspace(replacement=_snake_case , add_prefix_space=_snake_case )
lowerCamelCase__ = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
lowerCamelCase__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_snake_case , _snake_case )
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] , SCREAMING_SNAKE_CASE_ : int = 8000 , SCREAMING_SNAKE_CASE_ : bool = True , ):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_snake_case , special_tokens=self.special_tokens_list , show_progress=_snake_case , )
if isinstance(_snake_case , _snake_case ):
lowerCamelCase__ = [files]
self._tokenizer.train(_snake_case , trainer=_snake_case )
self.add_unk_id()
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Union[Iterator[str], Iterator[Iterator[str]]] , SCREAMING_SNAKE_CASE_ : int = 8000 , SCREAMING_SNAKE_CASE_ : bool = True , ):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_snake_case , special_tokens=self.special_tokens_list , show_progress=_snake_case , )
self._tokenizer.train_from_iterator(_snake_case , trainer=_snake_case )
self.add_unk_id()
def __UpperCAmelCase ( self : Union[str, Any] ):
lowerCamelCase__ = json.loads(self._tokenizer.to_str() )
lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""]
lowerCamelCase__ = Tokenizer.from_str(json.dumps(_snake_case ) )
| 129 |
SCREAMING_SNAKE_CASE__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
SCREAMING_SNAKE_CASE__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 9 | 0 |
'''simple docstring'''
def snake_case ( a_ : Optional[int] = 4_000_000 ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = [0, 1]
UpperCamelCase_ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCamelCase_ : Tuple = 0
for j in range(len(__UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 208 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : Any , **_snake_case : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
] , )
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
@slow
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _a ( self : Dict ):
"""simple docstring"""
pass
| 9 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
__A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A , __A = emb.weight.shape
__A = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
__A = emb.weight.data
return lin_layer
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = torch.load(__UpperCamelCase , map_location="cpu" )
__A = mam_aaa["args"] or mam_aaa["cfg"]["model"]
__A = mam_aaa["model"]
remove_ignore_keys_(__UpperCamelCase )
__A = state_dict["encoder.embed_tokens.weight"].shape[0]
__A = MaMaaaConfig(
vocab_size=__UpperCamelCase , max_position_embeddings=1_0_2_4 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
__A = state_dict["decoder.embed_tokens.weight"]
__A = MaMaaaForConditionalGeneration(__UpperCamelCase )
model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
__A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE :Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE :Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 55 |
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9 | 0 |
import math
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 615 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A__ = gamma_a
A__ = gamma_a
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( ) -> Dict:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['model']
A__ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(__UpperCamelCase )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def UpperCAmelCase__ (*A__ : Any , **A__ : Optional[int] ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
UpperCAmelCase : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ (self : List[Any] , A__ : Union[str, Any] , A__ : Tuple , A__ : Union[str, Any] ) -> Dict:
lowercase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
lowercase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ (self : Optional[Any] , A__ : Union[str, Any] , A__ : List[str] ) -> Union[str, Any]:
lowercase = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{"score": ANY(_snake_case ), "answer": ANY(_snake_case )}],
[{"score": ANY(_snake_case ), "answer": ANY(_snake_case )}],
] , )
@require_torch
def UpperCAmelCase__ (self : Any ) -> Optional[Any]:
lowercase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowercase = "How many cats are there?"
lowercase = vqa_pipeline(image=_snake_case , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_snake_case , [{"score": ANY(_snake_case ), "answer": ANY(_snake_case )}, {"score": ANY(_snake_case ), "answer": ANY(_snake_case )}] )
lowercase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_snake_case , [{"score": ANY(_snake_case ), "answer": ANY(_snake_case )}, {"score": ANY(_snake_case ), "answer": ANY(_snake_case )}] )
@slow
@require_torch
def UpperCAmelCase__ (self : Any ) -> str:
lowercase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowercase = "How many cats are there?"
lowercase = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
lowercase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
lowercase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ (self : Dict ) -> Optional[Any]:
pass
| 310 |
SCREAMING_SNAKE_CASE__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list[str]:
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 9 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCamelCase__ = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowerCamelCase__ = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
lowerCamelCase__ = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Union[str, Any] , __a : str , __a : int = CHRF.CHAR_ORDER , __a : int = CHRF.WORD_ORDER , __a : int = CHRF.BETA , __a : bool = False , __a : bool = False , __a : bool = False , ) -> Optional[int]:
_UpperCamelCase : Optional[int] = len(references[0] )
if any(len(_snake_case ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
_UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_snake_case )]
_UpperCamelCase : int = CHRF(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Dict = sb_chrf.corpus_score(_snake_case , _snake_case )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 624 |
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = 0
A__ = len(__UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , point + 1 , __UpperCamelCase )
def A ( __UpperCamelCase ) -> List[str]:
if collection != sorted(__UpperCamelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
SCREAMING_SNAKE_CASE__ = 0
if debug == 1:
SCREAMING_SNAKE_CASE__ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
SCREAMING_SNAKE_CASE__ = 6_7
SCREAMING_SNAKE_CASE__ = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print('''Not found''')
| 9 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCAmelCase : Optional[int] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__ : Tuple = 'lm_head'
lowerCamelCase__ : Dict = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
lowerCamelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
lowerCamelCase__ : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__ : Optional[int] = value
elif weight_type == "weight_g":
lowerCamelCase__ : Any = value
elif weight_type == "weight_v":
lowerCamelCase__ : Union[str, Any] = value
elif weight_type == "bias":
lowerCamelCase__ : Optional[int] = value
else:
lowerCamelCase__ : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ : int = []
lowerCamelCase__ : List[str] = fairseq_model.state_dict()
lowerCamelCase__ : Union[str, Any] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ : Any = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : str = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase__ : Union[str, Any] = True
if "*" in mapped_key:
lowerCamelCase__ : Optional[Any] = name.split(__UpperCamelCase )[0].split('.' )[-2]
lowerCamelCase__ : Optional[int] = mapped_key.replace('*' , __UpperCamelCase )
if "weight_g" in name:
lowerCamelCase__ : str = 'weight_g'
elif "weight_v" in name:
lowerCamelCase__ : Dict = 'weight_v'
elif "bias" in name:
lowerCamelCase__ : Optional[int] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ : Dict = 'weight'
else:
lowerCamelCase__ : Optional[int] = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = full_name.split('conv_layers.' )[-1]
lowerCamelCase__ : Optional[int] = name.split('.' )
lowerCamelCase__ : Dict = int(items[0] )
lowerCamelCase__ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__ : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__ : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__ : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__ : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True ) -> Optional[Any]:
if config_path is not None:
lowerCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(__UpperCamelCase )
else:
lowerCamelCase__ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__ : List[Any] = Dictionary.load_from_json(__UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : Tuple = target_dict.pad_index
lowerCamelCase__ : int = target_dict.bos_index
lowerCamelCase__ : List[str] = target_dict.eos_index
lowerCamelCase__ : Tuple = len(target_dict.symbols )
lowerCamelCase__ : Union[str, Any] = os.path.join(__UpperCamelCase , 'vocab.json' )
if not os.path.isdir(__UpperCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__UpperCamelCase ) )
return
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__ : Tuple = 42
lowerCamelCase__ : Tuple = 43
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase__ : str = WavaVecaPhonemeCTCTokenizer(
__UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__UpperCamelCase , )
lowerCamelCase__ : Dict = True if config.feat_extract_norm == 'layer' else False
lowerCamelCase__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
lowerCamelCase__ : List[str] = WavaVecaProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
lowerCamelCase__ : List[Any] = UniSpeechForCTC(__UpperCamelCase )
else:
lowerCamelCase__ : Optional[int] = UniSpeechForPreTraining(__UpperCamelCase )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__ : Any = model[0].eval()
recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
hf_unispeech.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_UpperCAmelCase : int = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 295 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 9 | 0 |
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
return 10 - x * x
def _lowercase ( __A ,__A ):
'''simple docstring'''
if equation(__UpperCamelCase ) * equation(__UpperCamelCase ) >= 0:
raise ValueError("""Wrong space!""" )
__UpperCamelCase = a
while (b - a) >= 0.01:
# Find middle point
__UpperCamelCase = (a + b) / 2
# Check if middle point is root
if equation(__UpperCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__UpperCamelCase ) * equation(__UpperCamelCase ) < 0:
__UpperCamelCase = c
else:
__UpperCamelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 601 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE__ = [0, 2_5, 5_0]
SCREAMING_SNAKE_CASE__ = [2_5, 5_0, 7_5]
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE__ = np.ones(7_5)
SCREAMING_SNAKE_CASE__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 9 | 0 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )-> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__UpperCAmelCase = cst_fwd.get(__UpperCamelCase , np.inf )
__UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__UpperCAmelCase = new_cost_f
__UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> int:
__UpperCAmelCase = -1
__UpperCAmelCase = set()
__UpperCAmelCase = set()
__UpperCAmelCase = {source: 0}
__UpperCAmelCase = {destination: 0}
__UpperCAmelCase = {source: None}
__UpperCAmelCase = {destination: None}
__UpperCAmelCase = PriorityQueue()
__UpperCAmelCase = PriorityQueue()
__UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__UpperCAmelCase , __UpperCAmelCase = queue_forward.get()
visited_forward.add(__UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase = queue_backward.get()
visited_backward.add(__UpperCamelCase )
__UpperCAmelCase = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
__UpperCAmelCase = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__UpperCAmelCase = shortest_distance
return shortest_path_distance
_A: int = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
_A: Optional[Any] = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 126 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _a ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _a ( self : int , _snake_case : int , _snake_case : List[str] ):
"""simple docstring"""
A__ = object_detector(examples[0] , threshold=0.0 )
A__ = len(_snake_case )
self.assertGreater(_snake_case , 0 )
self.assertEqual(
_snake_case , [
{
'score': ANY(_snake_case ),
'label': ANY(_snake_case ),
'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )},
}
for i in range(_snake_case )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@require_torch
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
A__ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
A__ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : int ):
"""simple docstring"""
pass
@require_torch
@slow
def _a ( self : str ):
"""simple docstring"""
A__ = 0.2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = 2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 9 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( UpperCAmelCase_ ):
"""simple docstring"""
__lowerCAmelCase : str = ["image_processor", "tokenizer"]
__lowerCAmelCase : str = "LayoutLMv3ImageProcessor"
__lowerCAmelCase : Tuple = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , _A=None , _A=None , **_A ):
'''simple docstring'''
UpperCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _snake_case , )
UpperCamelCase : Optional[Any] = kwargs.pop("""feature_extractor""" )
UpperCamelCase : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_snake_case , _snake_case )
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 0 , _A = None , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
UpperCamelCase : Tuple = self.image_processor(images=_snake_case , return_tensors=_snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_snake_case , _snake_case ):
UpperCamelCase : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase : Optional[int] = features["""words"""]
UpperCamelCase : str = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel values
UpperCamelCase : Optional[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCamelCase : int = self.get_overflowing_images(_snake_case , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCamelCase : Tuple = images
return encoded_inputs
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : Any = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(_snake_case )} and {len(_snake_case )}""" )
return images_with_overflow
def _a ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _a ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _a ( self ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a ( self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _snake_case , )
return self.image_processor_class
@property
def _a ( self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _snake_case , )
return self.image_processor
| 102 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9 | 0 |
"""simple docstring"""
import numpy
# List of input, output pairs
_SCREAMING_SNAKE_CASE = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_SCREAMING_SNAKE_CASE = (((515, 22, 13), 555), ((61, 35, 49), 150))
_SCREAMING_SNAKE_CASE = [2, 4, 1, 5]
_SCREAMING_SNAKE_CASE = len(train_data)
_SCREAMING_SNAKE_CASE = 0.009
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="train" ) -> str:
"""simple docstring"""
return calculate_hypothesis_value(__UpperCamelCase , __UpperCamelCase ) - output(
__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__snake_case = 0
for i in range(len(__UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=m ) -> Optional[int]:
"""simple docstring"""
__snake_case = 0
for i in range(__UpperCamelCase ):
if index == -1:
summation_value += _error(__UpperCamelCase )
else:
summation_value += _error(__UpperCamelCase ) * train_data[i][0][index]
return summation_value
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
__snake_case = summation_of_cost_derivative(__UpperCamelCase , __UpperCamelCase ) / m
return cost_derivative_value
def __UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__snake_case = 0.000_002
__snake_case = 0
__snake_case = 0
while True:
j += 1
__snake_case = [0, 0, 0, 0]
for i in range(0 , len(__UpperCamelCase ) ):
__snake_case = get_cost_derivative(i - 1 )
__snake_case = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__UpperCamelCase , __UpperCamelCase , atol=__UpperCamelCase , rtol=__UpperCamelCase , ):
break
__snake_case = temp_parameter_vector
print(("Number of iterations:", j) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
for i in range(len(__UpperCamelCase ) ):
print(("Actual output value:", output(__UpperCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__UpperCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 163 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> List[Any]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ):
A__ = []
for k, v in d.items():
A__ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
A__ = argparse.Namespace()
with open(__UpperCamelCase , 'r' ) as yaml_file:
try:
A__ = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader )
A__ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase , str(__UpperCamelCase ) ) )
return config
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = MobileViTVaConfig()
A__ = False
# dataset
if task_name.startswith('imagenet1k_' ):
A__ = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
A__ = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
A__ = 151
A__ = 512
A__ = 'ade20k-id2label.json'
A__ = True
elif task_name.startswith('voc_' ):
A__ = 21
A__ = 512
A__ = 'pascal-voc-id2label.json'
A__ = True
# orig_config
A__ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
A__ = getattr(__UpperCamelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__UpperCamelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A__ = getattr(__UpperCamelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A__ = getattr(__UpperCamelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
A__ = 'huggingface/label-files'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Dict:
if base_model:
A__ = ''
else:
A__ = 'mobilevitv2.'
A__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A__ = k[8:]
else:
A__ = k
if ".block." in k:
A__ = k_new.replace('.block.' , '.' )
if ".conv." in k:
A__ = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
A__ = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
A__ = k_new.replace('conv_1.' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A__ = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A__ = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
A__ = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A__ = [0, 1]
elif i == 4:
A__ = [0, 1, 2, 3]
elif i == 5:
A__ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A__ = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
A__ = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
A__ = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
A__ = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
A__ = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
A__ = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
A__ = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
A__ = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
A__ = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A ( __UpperCamelCase ) -> Tuple:
A__ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> str:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
A__ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
A__ = False
else:
A__ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
A__ = False
# remove and rename some keys of load the original model
A__ = checkpoint
remove_unused_keys(__UpperCamelCase )
A__ = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
A__ = outputs.logits
A__ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A__ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 9 | 0 |
"""simple docstring"""
__magic_name__ = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 129 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = '''docs/source/en/_toctree.yml'''
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
def A ( __UpperCamelCase=False ) -> str:
with open(__UpperCamelCase , encoding='utf-8' ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]['sections']
# Then to the model doc
A__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A__ = api_doc[model_idx]['sections']
A__ = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
A__ = False
for idx, modality_doc in modalities_docs:
A__ = modality_doc['sections']
A__ = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
A__ = True
if overwrite:
A__ = new_modality_doc
if diff:
if overwrite:
A__ = model_doc
A__ = api_doc
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 9 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase =get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
__a : List[str] = XLNetTokenizer
__a : str = XLNetTokenizerFast
__a : int = True
__a : List[str] = True
def _UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ : Tuple = XLNetTokenizer(_snake_case , keep_accents=_snake_case )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = """<s>"""
UpperCamelCase_ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(_snake_case ) , 10_06 )
def _UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = XLNetTokenizer(_snake_case , keep_accents=_snake_case )
UpperCamelCase_ : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [2_85, 46, 10, 1_70, 3_82] )
UpperCamelCase_ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase_ : str = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(_snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
UpperCamelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = XLNetTokenizer(_snake_case , do_lower_case=_snake_case )
UpperCamelCase_ : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = XLNetTokenizer(_snake_case , do_lower_case=_snake_case )
UpperCamelCase_ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
UpperCamelCase_ : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_snake_case )
UpperCamelCase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_snake_case )
UpperCamelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCamelCase_ : List[str] = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 208 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : int ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
A__ : str = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 9 | 0 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE :Union[str, Any] = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE :List[str] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = calculate_rouge(__UpperCamelCase , __UpperCamelCase , bootstrap_aggregation=__UpperCamelCase , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
__A = calculate_rouge(__UpperCamelCase , __UpperCamelCase , bootstrap_aggregation=__UpperCamelCase , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = "rougeLsum"
__A = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=[k] )[k]
__A = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = ["rouge1", "rouge2", "rougeL"]
__A = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=__UpperCamelCase )
__A = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=__UpperCamelCase )
assert score_sep == score_no_sep
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
__A = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase ) == calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
__A = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
__A = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
__A = calculate_rouge(__UpperCamelCase , __UpperCamelCase , rouge_keys=["rougeLsum"] , newline_sep=__UpperCamelCase )["rougeLsum"]
__A = calculate_rouge(__UpperCamelCase , __UpperCamelCase , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Path("examples/seq2seq/test_data/wmt_en_ro" )
__A = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
__A = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
| 55 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9 | 0 |
from __future__ import annotations
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 615 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["input_values", "attention_mask"]
def __init__( self : str , _snake_case : int = 1 , _snake_case : int = 1_60_00 , _snake_case : float = 0.0 , _snake_case : bool = False , _snake_case : int = 80 , _snake_case : int = 16 , _snake_case : int = 64 , _snake_case : str = "hann_window" , _snake_case : float = 1.0 , _snake_case : float = 80 , _snake_case : float = 76_00 , _snake_case : float = 1E-10 , _snake_case : int = 2 , _snake_case : bool = True , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
A__ = do_normalize
A__ = return_attention_mask
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = frame_signal_scale
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = reduction_factor
A__ = win_length * sampling_rate // 10_00
A__ = hop_length * sampling_rate // 10_00
A__ = optimal_fft_length(self.sample_size )
A__ = (self.n_fft // 2) + 1
A__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_snake_case )
A__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
A__ = np.array(_snake_case , np.intaa )
A__ = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A__ = padding_value
normed_input_values.append(_snake_case )
else:
A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _a ( self : Tuple , _snake_case : np.ndarray , ):
"""simple docstring"""
A__ = spectrogram(
_snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : List[str] , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[int] = None , **_snake_case : Tuple , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
else:
A__ = None
if audio_target is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
if inputs is None:
return inputs_target
else:
A__ = inputs_target['input_values']
A__ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def _a ( self : Tuple , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = False , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = isinstance(_snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
A__ = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [speech]
# needed to make pad() work on spectrogram inputs
A__ = self.feature_size
# convert into correct format for padding
if is_target:
A__ = [self._extract_mel_features(_snake_case ) for waveform in speech]
A__ = BatchFeature({'input_values': features} )
A__ = self.num_mel_bins
else:
A__ = BatchFeature({'input_values': speech} )
A__ = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
A__ = feature_size_hack
# convert input values to correct format
A__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A__ = (
attention_mask
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_snake_case , padding_value=self.padding_value )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 9 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__(self : List[str] , A__ : Optional[int] , A__ : List[str] ) -> List[Any]:
lowercase = params
lowercase = np.array(_snake_case )
lowercase = np.array([len(_snake_case ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__(self : str , A__ : str ) -> Union[str, Any]:
return (self.token_ids[index], self.lengths[index])
def __len__(self : Union[str, Any] ) -> str:
return len(self.lengths )
def UpperCAmelCase__ (self : Union[str, Any] ) -> List[Any]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase__ (self : str ) -> Optional[int]:
lowercase = self.params.max_model_input_size
lowercase = self.lengths > max_len
logger.info(f'Splitting {sum(_snake_case )} too long sequences.' )
def divide_chunks(A__ : List[Any] , A__ : Optional[int] ):
return [l[i : i + n] for i in range(0 , len(_snake_case ) , _snake_case )]
lowercase = []
lowercase = []
if self.params.mlm:
lowercase , lowercase = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
lowercase , lowercase = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowercase = np.insert(_snake_case , 0 , _snake_case )
if sub_s[-1] != sep_id:
lowercase = np.insert(_snake_case , len(_snake_case ) , _snake_case )
assert len(_snake_case ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_snake_case )
new_tok_ids.extend(_snake_case )
new_lengths.extend([len(_snake_case ) for l in sub_seqs] )
lowercase = np.array(_snake_case )
lowercase = np.array(_snake_case )
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
lowercase = len(self )
lowercase = self.lengths > 1_1
lowercase = self.token_ids[indices]
lowercase = self.lengths[indices]
lowercase = len(self )
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def UpperCAmelCase__ (self : Union[str, Any] ) -> List[Any]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase = self.params.special_tok_ids["unk_token"]
lowercase = len(self )
lowercase = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase = (unk_occs / self.lengths) < 0.5
lowercase = self.token_ids[indices]
lowercase = self.lengths[indices]
lowercase = len(self )
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def UpperCAmelCase__ (self : str ) -> Tuple:
if not self.params.is_master:
return
logger.info(f'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase__ (self : Optional[int] , A__ : List[str] ) -> Dict:
lowercase = [t[0] for t in batch]
lowercase = [t[1] for t in batch]
assert len(_snake_case ) == len(_snake_case )
# Max for paddings
lowercase = max(_snake_case )
# Pad token ids
if self.params.mlm:
lowercase = self.params.special_tok_ids["pad_token"]
else:
lowercase = self.params.special_tok_ids["unk_token"]
lowercase = [list(t.astype(_snake_case ) ) + [pad_idx] * (max_seq_len_ - len(_snake_case )) for t in token_ids]
assert len(tk_ ) == len(_snake_case )
assert all(len(_snake_case ) == max_seq_len_ for t in tk_ )
lowercase = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase = torch.tensor(_snake_case ) # (bs)
return tk_t, lg_t
| 310 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = OmegaConf.load(__UpperCamelCase )
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = 'first_stage_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**__UpperCamelCase ).eval()
vqvae.load_state_dict(__UpperCamelCase )
A__ = UNetLDMModel(**__UpperCamelCase ).eval()
unet.load_state_dict(__UpperCamelCase )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , )
A__ = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipeline.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
re.sub("<n>" ,"" ,__UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCamelCase ) )
| 624 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_UpperCAmelCase : int = ["""small""", """medium""", """large"""]
_UpperCAmelCase : str = """lm_head.decoder.weight"""
_UpperCAmelCase : List[str] = """lm_head.weight"""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
lowerCamelCase__ : Dict = torch.load(__UpperCamelCase )
lowerCamelCase__ : str = d.pop(__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
torch.save(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
_UpperCAmelCase : Any = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_UpperCAmelCase : str = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
_UpperCAmelCase : List[Any] = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 295 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ) -> Optional[int]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = embeddings_size
__UpperCamelCase = hidden_sizes
__UpperCamelCase = depths
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_act
__UpperCamelCase = num_labels
__UpperCamelCase = scope
__UpperCamelCase = len(_snake_case )
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ) -> Dict:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]:
__UpperCamelCase = TFRegNetModel(config=_snake_case )
__UpperCamelCase = model(_snake_case , training=_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFRegNetForImageClassification(_snake_case )
__UpperCamelCase = model(_snake_case , labels=_snake_case , training=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = TFRegNetModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def __lowerCamelCase ( self ) -> Optional[int]:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __lowerCamelCase ( self ) -> List[Any]:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def __lowerCamelCase ( self ) -> Any:
pass
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(_snake_case )
__UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def __lowerCamelCase ( self ) -> List[Any]:
def check_hidden_states_output(lowercase , lowercase , lowercase ):
__UpperCamelCase = model_class(_snake_case )
__UpperCamelCase = model(**self._prepare_for_class(_snake_case , _snake_case ) , training=_snake_case )
__UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCamelCase = layer_type
__UpperCamelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
__UpperCamelCase = model(_snake_case , return_dict=_snake_case , **_snake_case )
__UpperCamelCase = model(_snake_case , return_dict=_snake_case , **_snake_case ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(_snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_snake_case , _snake_case ):
recursive_check(_snake_case , _snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_snake_case , _snake_case ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(_snake_case , _snake_case )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(_snake_case )
__UpperCamelCase = self._prepare_for_class(_snake_case , _snake_case )
__UpperCamelCase = self._prepare_for_class(_snake_case , _snake_case )
check_equivalence(_snake_case , _snake_case , _snake_case )
__UpperCamelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
__UpperCamelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
check_equivalence(_snake_case , _snake_case , _snake_case )
__UpperCamelCase = self._prepare_for_class(_snake_case , _snake_case )
__UpperCamelCase = self._prepare_for_class(_snake_case , _snake_case )
check_equivalence(_snake_case , _snake_case , _snake_case , {"""output_hidden_states""": True} )
__UpperCamelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
__UpperCamelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
check_equivalence(_snake_case , _snake_case , _snake_case , {"""output_hidden_states""": True} )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFRegNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase__ ( unittest.TestCase):
@cached_property
def __lowerCamelCase ( self ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=_snake_case , return_tensors="""tf""" )
# forward pass
__UpperCamelCase = model(**_snake_case , training=_snake_case )
# verify the logits
__UpperCamelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _snake_case )
__UpperCamelCase = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _snake_case , atol=1E-4 )
| 601 |
from __future__ import annotations
from typing import Any
def A ( __UpperCamelCase ) -> int:
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_A: List[str] = """"""
_A: List[str] = """"""
_A: Optional[Any] = """"""
_A: Dict = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase ( )-> None:
__UpperCAmelCase , __UpperCAmelCase = get_dataset(__UpperCamelCase , __UpperCamelCase )
print('Processing...' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(32 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
__UpperCAmelCase = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , __UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(__UpperCamelCase )} with {file_name}' )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(__UpperCamelCase )
with open(F'/{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> tuple[list, list]:
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(__UpperCamelCase , '*.txt' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__UpperCamelCase ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(__UpperCamelCase , F'{label_name}.jpg' )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 )-> tuple[list, list, list]:
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(__UpperCamelCase ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(__UpperCamelCase )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(__UpperCamelCase )
if flip_type == 1:
__UpperCAmelCase = cva.flip(__UpperCamelCase , __UpperCamelCase )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(__UpperCamelCase , __UpperCamelCase )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase ( _lowerCAmelCase = 32 )-> str:
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 126 |
from __future__ import annotations
def A ( __UpperCamelCase = 4 ) -> list[list[int]]:
A__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = matrix[::-1]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [x[::-1] for x in matrix]
return matrix
def A ( __UpperCamelCase ) -> None:
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 9 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : str = None
__lowerCAmelCase : str = BloomTokenizerFast
__lowerCAmelCase : List[str] = BloomTokenizerFast
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : int = False
__lowerCAmelCase : List[Any] = "tokenizer_file"
__lowerCAmelCase : Optional[Any] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _a ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase : List[str] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.get_rust_tokenizer()
UpperCamelCase : Tuple = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
UpperCamelCase : List[str] = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
UpperCamelCase : Union[str, Any] = tokenizer.batch_encode_plus(_snake_case )["""input_ids"""]
self.assertListEqual(_snake_case , _snake_case )
UpperCamelCase : str = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _a ( self , _A=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
UpperCamelCase : str = """This is a simple input"""
UpperCamelCase : Dict = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCamelCase : List[Any] = ("""This is a simple input""", """This is a pair""")
UpperCamelCase : List[str] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
UpperCamelCase : List[str] = None # Hotfixing padding = None
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding="""max_length""" )
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding="""max_length""" )
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding="""max_length""" , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding="""max_length""" )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding="""max_length""" )
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding="""max_length""" , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase : int = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=_snake_case )
UpperCamelCase : Dict = next(iter(_snake_case ) )["""premise"""] # pick up one data
UpperCamelCase : Optional[Any] = list(sample_data.values() )
UpperCamelCase : Union[str, Any] = list(map(tokenizer.encode , _snake_case ) )
UpperCamelCase : Optional[int] = [tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case ) for x in output_tokens]
self.assertListEqual(_snake_case , _snake_case )
def _a ( self ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 102 |
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
A__ = 10
return solutions
def A ( __UpperCamelCase = 2 ) -> int:
A__ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
A__ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 9 | 0 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __magic_name__ ( UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE : str = ["vqvae"]
def __init__( self : int , snake_case_ : AutoencoderKL , snake_case_ : UNetaDConditionModel , snake_case_ : Mel , snake_case_ : Union[DDIMScheduler, DDPMScheduler] , ):
super().__init__()
self.register_modules(unet=_snake_case , scheduler=_snake_case , mel=_snake_case , vqvae=_snake_case )
def lowerCAmelCase ( self : Tuple ):
return 50 if isinstance(self.scheduler , _snake_case ) else 1000
@torch.no_grad()
def __call__( self : List[str] , snake_case_ : int = 1 , snake_case_ : str = None , snake_case_ : np.ndarray = None , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : int = None , snake_case_ : torch.Generator = None , snake_case_ : float = 0 , snake_case_ : float = 0 , snake_case_ : torch.Generator = None , snake_case_ : float = 0 , snake_case_ : torch.Tensor = None , snake_case_ : torch.Tensor = None , snake_case_ : int=True , ):
__snake_case = steps or self.get_default_steps()
self.scheduler.set_timesteps(_snake_case )
__snake_case = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__snake_case = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__snake_case = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_snake_case , device=self.device , )
__snake_case = noise
__snake_case = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_snake_case , _snake_case )
__snake_case = self.mel.audio_slice_to_image(_snake_case )
__snake_case = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
__snake_case = (input_image / 255) * 2 - 1
__snake_case = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__snake_case = self.vqvae.encode(torch.unsqueeze(_snake_case , 0 ) ).latent_dist.sample(
generator=_snake_case )[0]
__snake_case = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__snake_case = self.scheduler.add_noise(_snake_case , _snake_case , self.scheduler.timesteps[start_step - 1] )
__snake_case = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__snake_case = int(mask_start_secs * pixels_per_second )
__snake_case = int(mask_end_secs * pixels_per_second )
__snake_case = self.scheduler.add_noise(_snake_case , _snake_case , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _snake_case ):
__snake_case = self.unet(_snake_case , _snake_case , _snake_case )["sample"]
else:
__snake_case = self.unet(_snake_case , _snake_case )["sample"]
if isinstance(self.scheduler , _snake_case ):
__snake_case = self.scheduler.step(
model_output=_snake_case , timestep=_snake_case , sample=_snake_case , eta=_snake_case , generator=_snake_case , )["prev_sample"]
else:
__snake_case = self.scheduler.step(
model_output=_snake_case , timestep=_snake_case , sample=_snake_case , generator=_snake_case , )["prev_sample"]
if mask is not None:
if mask_start > 0:
__snake_case = mask[:, step, :, :mask_start]
if mask_end > 0:
__snake_case = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__snake_case = 1 / self.vqvae.config.scaling_factor * images
__snake_case = self.vqvae.decode(_snake_case )["sample"]
__snake_case = (images / 2 + 0.5).clamp(0 , 1 )
__snake_case = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__snake_case = (images * 255).round().astype("uint8" )
__snake_case = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_snake_case , mode="RGB" ).convert("L" ) for _ in images) )
__snake_case = [self.mel.image_to_audio(_snake_case ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_snake_case )[:, np.newaxis, :] ) , **ImagePipelineOutput(_snake_case ) )
@torch.no_grad()
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : List[Image.Image] , snake_case_ : int = 50 ):
assert isinstance(self.scheduler , _snake_case )
self.scheduler.set_timesteps(_snake_case )
__snake_case = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
__snake_case = (sample / 255) * 2 - 1
__snake_case = torch.Tensor(_snake_case ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__snake_case = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__snake_case = self.scheduler.alphas_cumprod[t]
__snake_case = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__snake_case = 1 - alpha_prod_t
__snake_case = self.unet(_snake_case , _snake_case )["sample"]
__snake_case = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__snake_case = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__snake_case = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCAmelCase ( snake_case_ : torch.Tensor , snake_case_ : torch.Tensor , snake_case_ : float ):
__snake_case = acos(torch.dot(torch.flatten(_snake_case ) , torch.flatten(_snake_case ) ) / torch.norm(_snake_case ) / torch.norm(_snake_case ) )
return sin((1 - alpha) * theta ) * xa / sin(_snake_case ) + sin(alpha * theta ) * xa / sin(_snake_case )
| 163 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__magic_name__ = 50_00_00
__magic_name__ , __magic_name__ = os.path.split(__file__)
__magic_name__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _A ( __lowercase , **__lowercase ):
"""simple docstring"""
lowerCamelCase__ = dataset.map(**__UpperCamelCase )
@get_duration
def _A ( __lowercase , **__lowercase ):
"""simple docstring"""
lowerCamelCase__ = dataset.filter(**__UpperCamelCase )
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase__ = generate_example_dataset(
os.path.join(__UpperCamelCase , """dataset.arrow""" ) , __UpperCamelCase , num_examples=__UpperCamelCase )
lowerCamelCase__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__UpperCamelCase )
def tokenize(__lowercase ):
return tokenizer(examples["""text"""] )
lowerCamelCase__ = map(__UpperCamelCase )
lowerCamelCase__ = map(__UpperCamelCase , batched=__UpperCamelCase )
lowerCamelCase__ = map(__UpperCamelCase , function=lambda __lowercase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase__ = map(__UpperCamelCase , function=lambda __lowercase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase__ = map(__UpperCamelCase , function=lambda __lowercase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowerCamelCase__ = map(__UpperCamelCase , function=lambda __lowercase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowerCamelCase__ = map(__UpperCamelCase , function=lambda __lowercase : None , batched=__UpperCamelCase )
lowerCamelCase__ = map(__UpperCamelCase , function=__UpperCamelCase , batched=__UpperCamelCase )
lowerCamelCase__ = filter(__UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__UpperCamelCase , """wb""" ) as f:
f.write(json.dumps(__UpperCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 129 |
SCREAMING_SNAKE_CASE__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
SCREAMING_SNAKE_CASE__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 9 | 0 |
'''simple docstring'''
import math
def snake_case ( a_ : List[str] ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( a_ : int = 10_001 ) -> int:
"""simple docstring"""
try:
UpperCamelCase_ : List[str] = int(__UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
UpperCamelCase_ : int = []
UpperCamelCase_ : Union[str, Any] = 2
while len(__UpperCamelCase ) < nth:
if is_prime(__UpperCamelCase ):
primes.append(__UpperCamelCase )
num += 1
else:
num += 1
return primes[len(__UpperCamelCase ) - 1]
if __name__ == "__main__":
print(f"{solution() = }")
| 208 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : Any , **_snake_case : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
] , )
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
@slow
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _a ( self : Dict ):
"""simple docstring"""
pass
| 9 | 0 |
from __future__ import annotations
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
SCREAMING_SNAKE_CASE :List[Any] = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase ( a_ ) -> list[int]:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
__A = []
for num in range(len(__UpperCamelCase ) ):
__A = 0
while 2 * i * i <= odd_composites[num]:
__A = odd_composites[num] - 2 * i * i
if is_prime(__UpperCamelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__UpperCamelCase ) == n:
return list_nums
return []
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 |
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9 | 0 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ : Optional[Any] = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (UpperCAmelCase_ ):
lowercase_ : Any = "mask2former"
lowercase_ : int = ["swin"]
lowercase_ : Union[str, Any] = {"hidden_size": "hidden_dim"}
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Dict] = None , __lowerCamelCase : int = 2_56 , __lowerCamelCase : int = 2_56 , __lowerCamelCase : int = 2_56 , __lowerCamelCase : int = 10_24 , __lowerCamelCase : str = "relu" , __lowerCamelCase : int = 6 , __lowerCamelCase : int = 10 , __lowerCamelCase : int = 8 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 20_48 , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : int = 4 , __lowerCamelCase : int = 2_55 , __lowerCamelCase : int = 1_00 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 2.0 , __lowerCamelCase : float = 5.0 , __lowerCamelCase : float = 5.0 , __lowerCamelCase : int = 1_25_44 , __lowerCamelCase : float = 3.0 , __lowerCamelCase : float = 0.75 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : bool = True , __lowerCamelCase : List[int] = [4, 8, 16, 32] , __lowerCamelCase : bool = None , **__lowerCamelCase : str , ):
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
lowerCAmelCase__ = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_snake_case , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase__ = backbone_config.pop('''model_type''' )
lowerCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {','.join(self.backbones_supported )}""" )
lowerCAmelCase__ = backbone_config
lowerCAmelCase__ = feature_size
lowerCAmelCase__ = mask_feature_size
lowerCAmelCase__ = hidden_dim
lowerCAmelCase__ = encoder_feedforward_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = encoder_layers
lowerCAmelCase__ = decoder_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = dropout
lowerCAmelCase__ = dim_feedforward
lowerCAmelCase__ = pre_norm
lowerCAmelCase__ = enforce_input_projection
lowerCAmelCase__ = common_stride
lowerCAmelCase__ = ignore_value
lowerCAmelCase__ = num_queries
lowerCAmelCase__ = no_object_weight
lowerCAmelCase__ = class_weight
lowerCAmelCase__ = mask_weight
lowerCAmelCase__ = dice_weight
lowerCAmelCase__ = train_num_points
lowerCAmelCase__ = oversample_ratio
lowerCAmelCase__ = importance_sample_ratio
lowerCAmelCase__ = init_std
lowerCAmelCase__ = init_xavier_std
lowerCAmelCase__ = use_auxiliary_loss
lowerCAmelCase__ = feature_strides
lowerCAmelCase__ = output_auxiliary_logits
lowerCAmelCase__ = decoder_layers
super().__init__(**_snake_case )
@classmethod
def A__ ( cls : Union[str, Any] , __lowerCamelCase : PretrainedConfig , **__lowerCamelCase : Tuple ):
"""simple docstring"""
return cls(
backbone_config=_snake_case , **_snake_case , )
def A__ ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.backbone_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output
| 615 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A__ = gamma_a
A__ = gamma_a
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( ) -> Dict:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['model']
A__ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(__UpperCamelCase )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase : int = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__(self : Dict , *A__ : Optional[int] , **A__ : Optional[Any] ) -> Tuple:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 310 |
SCREAMING_SNAKE_CASE__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list[str]:
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 9 | 0 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
print("Loading config file..." )
def flatten_yaml_as_dict(lowercase_ ,lowercase_="" ,lowercase_="." ):
_UpperCamelCase : Union[str, Any] = []
for k, v in d.items():
_UpperCamelCase : Tuple = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
_UpperCamelCase : Dict = argparse.Namespace()
with open(__UpperCamelCase ,"r" ) as yaml_file:
try:
_UpperCamelCase : Dict = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader )
_UpperCamelCase : List[Any] = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(__UpperCamelCase ,str(__UpperCamelCase ) ) )
return config
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : str = MobileViTVaConfig()
_UpperCamelCase : Dict = False
# dataset
if task_name.startswith("imagenet1k_" ):
_UpperCamelCase : Optional[int] = 1_000
if int(task_name.strip().split("_" )[-1] ) == 384:
_UpperCamelCase : Any = 384
else:
_UpperCamelCase : Dict = 256
_UpperCamelCase : List[str] = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
_UpperCamelCase : Tuple = 21_000
if int(task_name.strip().split("_" )[-1] ) == 384:
_UpperCamelCase : Optional[int] = 384
else:
_UpperCamelCase : Tuple = 256
_UpperCamelCase : Union[str, Any] = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
_UpperCamelCase : Optional[int] = 151
_UpperCamelCase : Tuple = 512
_UpperCamelCase : List[Any] = "ade20k-id2label.json"
_UpperCamelCase : Tuple = True
elif task_name.startswith("voc_" ):
_UpperCamelCase : Optional[Any] = 21
_UpperCamelCase : Dict = 512
_UpperCamelCase : Union[str, Any] = "pascal-voc-id2label.json"
_UpperCamelCase : Tuple = True
# orig_config
_UpperCamelCase : Optional[Any] = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase ,"model.classification.name" ,-1 ) == "mobilevit_v2", "Invalid model"
_UpperCamelCase : Optional[Any] = getattr(__UpperCamelCase ,"model.classification.mitv2.width_multiplier" ,1.0 )
assert (
getattr(__UpperCamelCase ,"model.classification.mitv2.attn_norm_layer" ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_UpperCamelCase : Any = getattr(__UpperCamelCase ,"model.classification.activation.name" ,"swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_UpperCamelCase : Any = getattr(__UpperCamelCase ,"model.segmentation.output_stride" ,16 )
if "_deeplabv3" in task_name:
_UpperCamelCase : str = getattr(__UpperCamelCase ,"model.segmentation.deeplabv3.aspp_rates" ,[12, 24, 36] )
_UpperCamelCase : Tuple = getattr(__UpperCamelCase ,"model.segmentation.deeplabv3.aspp_out_channels" ,512 )
_UpperCamelCase : str = getattr(__UpperCamelCase ,"model.segmentation.deeplabv3.aspp_dropout" ,0.1 )
# id2label
_UpperCamelCase : int = "huggingface/label-files"
_UpperCamelCase : Union[str, Any] = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
_UpperCamelCase : Tuple = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = idalabel
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = dct.pop(__UpperCamelCase )
_UpperCamelCase : Any = val
def lowercase__ ( lowercase_ ,lowercase_=False ) -> Dict:
"""simple docstring"""
if base_model:
_UpperCamelCase : Any = ""
else:
_UpperCamelCase : int = "mobilevitv2."
_UpperCamelCase : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_UpperCamelCase : Tuple = k[8:]
else:
_UpperCamelCase : List[str] = k
if ".block." in k:
_UpperCamelCase : Union[str, Any] = k_new.replace(".block." ,"." )
if ".conv." in k:
_UpperCamelCase : List[str] = k_new.replace(".conv." ,".convolution." )
if ".norm." in k:
_UpperCamelCase : str = k_new.replace(".norm." ,".normalization." )
if "conv_1." in k:
_UpperCamelCase : str = k_new.replace("conv_1." ,F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
_UpperCamelCase : str = k_new.replace(F'''layer_{i}.''' ,F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_UpperCamelCase : int = k_new.replace(".exp_1x1." ,".expand_1x1." )
if ".red_1x1." in k:
_UpperCamelCase : Any = k_new.replace(".red_1x1." ,".reduce_1x1." )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
_UpperCamelCase : str = k_new.replace(F'''layer_{i}.0.''' ,F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
_UpperCamelCase : int = k_new.replace(F'''layer_{i}.1.local_rep.0.''' ,F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
_UpperCamelCase : List[Any] = k_new.replace(F'''layer_{i}.1.local_rep.1.''' ,F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_UpperCamelCase : Tuple = [0, 1]
elif i == 4:
_UpperCamelCase : int = [0, 1, 2, 3]
elif i == 5:
_UpperCamelCase : Dict = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
_UpperCamelCase : Union[str, Any] = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' ,F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
_UpperCamelCase : int = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' ,F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
_UpperCamelCase : Optional[Any] = k_new.replace(F'''layer_{i}.1.conv_proj.''' ,F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_UpperCamelCase : Tuple = k_new.replace("pre_norm_attn.0." ,"layernorm_before." )
if "pre_norm_attn.1." in k:
_UpperCamelCase : List[str] = k_new.replace("pre_norm_attn.1." ,"attention." )
if "pre_norm_ffn.0." in k:
_UpperCamelCase : Union[str, Any] = k_new.replace("pre_norm_ffn.0." ,"layernorm_after." )
if "pre_norm_ffn.1." in k:
_UpperCamelCase : str = k_new.replace("pre_norm_ffn.1." ,"ffn.conv1." )
if "pre_norm_ffn.3." in k:
_UpperCamelCase : Tuple = k_new.replace("pre_norm_ffn.3." ,"ffn.conv2." )
if "classifier.1." in k:
_UpperCamelCase : str = k_new.replace("classifier.1." ,"classifier." )
if "seg_head." in k:
_UpperCamelCase : Any = k_new.replace("seg_head." ,"segmentation_head." )
if ".aspp_layer." in k:
_UpperCamelCase : Dict = k_new.replace(".aspp_layer." ,"." )
if ".aspp_pool." in k:
_UpperCamelCase : str = k_new.replace(".aspp_pool." ,"." )
rename_keys.append((k, k_new) )
return rename_keys
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_UpperCamelCase : Optional[int] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase )
# load original state_dict
_UpperCamelCase : Dict = torch.load(__UpperCamelCase ,map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
_UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
_UpperCamelCase : List[Any] = False
else:
_UpperCamelCase : List[Any] = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
_UpperCamelCase : List[str] = False
# remove and rename some keys of load the original model
_UpperCamelCase : Tuple = checkpoint
remove_unused_keys(__UpperCamelCase )
_UpperCamelCase : Tuple = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
_UpperCamelCase : Tuple = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
_UpperCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
_UpperCamelCase : Union[str, Any] = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith("imagenet" ):
_UpperCamelCase : Optional[int] = outputs.logits
_UpperCamelCase : Any = logits.argmax(-1 ).item()
print("Predicted class:" ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_UpperCamelCase : Optional[Any] = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
lowerCamelCase__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 624 |
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = 0
A__ = len(__UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , point + 1 , __UpperCamelCase )
def A ( __UpperCamelCase ) -> List[str]:
if collection != sorted(__UpperCamelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
SCREAMING_SNAKE_CASE__ = 0
if debug == 1:
SCREAMING_SNAKE_CASE__ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
SCREAMING_SNAKE_CASE__ = 6_7
SCREAMING_SNAKE_CASE__ = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print('''Not found''')
| 9 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : Union[str, Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[int] ) -> Any:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : str , **UpperCAmelCase : List[str] ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ) -> List[Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : str , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> List[str]:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ) -> Any:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ) -> List[Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : Any , *UpperCAmelCase : List[str] , **UpperCAmelCase : Any ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[Any] ) -> str:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : List[str] ) -> Tuple:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ) -> Tuple:
requires_backends(cls , ['flax'] )
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Any ) -> Tuple:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : int , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Any ) -> List[str]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : str , *UpperCAmelCase : List[str] , **UpperCAmelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Tuple ) -> List[Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : int ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[Any] ) -> List[Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : Dict , *UpperCAmelCase : int , **UpperCAmelCase : Dict ) -> List[Any]:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ) -> List[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : int , *UpperCAmelCase : List[str] , **UpperCAmelCase : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : Tuple , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ) -> List[Any]:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : List[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : Any , **UpperCAmelCase : List[str] ) -> int:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ) -> Any:
requires_backends(cls , ['flax'] )
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : int ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : Tuple , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> Any:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : str , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[str] ) -> str:
requires_backends(cls , ['flax'] )
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : int , *UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> Any:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
UpperCAmelCase__ = ["flax"]
def __init__( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ) -> Any:
requires_backends(self , ['flax'] )
@classmethod
def A_ ( cls : int , *UpperCAmelCase : str , **UpperCAmelCase : Dict ) -> Optional[int]:
requires_backends(cls , ['flax'] )
@classmethod
def A_ ( cls : Tuple , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
| 295 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 9 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a__ : Tuple = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 601 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE__ = [0, 2_5, 5_0]
SCREAMING_SNAKE_CASE__ = [2_5, 5_0, 7_5]
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE__ = np.ones(7_5)
SCREAMING_SNAKE_CASE__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 9 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A: str = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: str = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_A: Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 126 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _a ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _a ( self : int , _snake_case : int , _snake_case : List[str] ):
"""simple docstring"""
A__ = object_detector(examples[0] , threshold=0.0 )
A__ = len(_snake_case )
self.assertGreater(_snake_case , 0 )
self.assertEqual(
_snake_case , [
{
'score': ANY(_snake_case ),
'label': ANY(_snake_case ),
'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )},
}
for i in range(_snake_case )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@require_torch
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
A__ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
A__ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : int ):
"""simple docstring"""
pass
@require_torch
@slow
def _a ( self : str ):
"""simple docstring"""
A__ = 0.2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = 2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 9 | 0 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__magic_name__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class lowercase__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , **_A ):
'''simple docstring'''
super().__init__(**_snake_case )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(_snake_case )
def _a ( self , **_A ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = {}
UpperCamelCase : Optional[int] = {}
UpperCamelCase : int = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase : Tuple = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
UpperCamelCase : List[str] = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
UpperCamelCase : int = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
UpperCamelCase : Union[str, Any] = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase : int = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase : Dict = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
UpperCamelCase : Optional[Any] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
UpperCamelCase : List[Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
UpperCamelCase : Optional[Any] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
UpperCamelCase : List[Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
UpperCamelCase : int = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
UpperCamelCase : Dict = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , _A , *_A , _A=None , _A=None , **_A ):
'''simple docstring'''
return super().__call__(_snake_case , *_snake_case , num_workers=_snake_case , batch_size=_snake_case , **_snake_case )
def _a ( self , _A , _A=6_4 , _A = 0 , _A = 5_1_2 / 1_5_0_0 , _A = 3_2 , _A = 1 , ):
'''simple docstring'''
UpperCamelCase : Any = load_image(_snake_case )
UpperCamelCase : Any = self.image_processor.size["""longest_edge"""]
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = self.image_processor.generate_crop_boxes(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
UpperCamelCase : Optional[int] = self.image_processor(images=_snake_case , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase : Union[str, Any] = self.get_inference_context()
with inference_context():
UpperCamelCase : Optional[Any] = self._ensure_tensor_on_device(_snake_case , device=self.device )
UpperCamelCase : Any = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
UpperCamelCase : Union[str, Any] = image_embeddings
UpperCamelCase : str = grid_points.shape[1]
UpperCamelCase : Union[str, Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , _snake_case , _snake_case ):
UpperCamelCase : Any = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase : Optional[int] = input_labels[:, i : i + points_per_batch]
UpperCamelCase : Dict = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a ( self , _A , _A=0.88 , _A=0.95 , _A=0 , _A=1 , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = model_inputs.pop("""input_boxes""" )
UpperCamelCase : Optional[int] = model_inputs.pop("""is_last""" )
UpperCamelCase : Optional[Any] = model_inputs.pop("""original_sizes""" ).tolist()
UpperCamelCase : Dict = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
UpperCamelCase : str = self.model(**_snake_case )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase : Any = model_outputs["""pred_masks"""]
UpperCamelCase : List[Any] = self.image_processor.post_process_masks(
_snake_case , _snake_case , _snake_case , _snake_case , binarize=_snake_case )
UpperCamelCase : str = model_outputs["""iou_scores"""]
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _snake_case , _snake_case , _snake_case , _snake_case , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a ( self , _A , _A=False , _A=False , _A=0.7 , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = []
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Any = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
UpperCamelCase : Dict = torch.cat(_snake_case )
UpperCamelCase : Any = torch.cat(_snake_case )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = self.image_processor.post_process_for_mask_generation(
_snake_case , _snake_case , _snake_case , _snake_case )
UpperCamelCase : str = defaultdict(_snake_case )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_snake_case )
UpperCamelCase : Optional[Any] = {}
if output_rle_mask:
UpperCamelCase : Dict = rle_mask
if output_bboxes_mask:
UpperCamelCase : List[Any] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 102 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
__snake_case = s.rsplit(__UpperCamelCase , __UpperCamelCase )
return new.join(__UpperCamelCase )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__snake_case = {}
__snake_case = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__snake_case = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
__snake_case = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
__snake_case = rreplace(__UpperCamelCase , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
__snake_case = rreplace(__UpperCamelCase , ".b" , ".bias" , 1 )
__snake_case = value.float()
return upgrade
@torch.no_grad()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
"""simple docstring"""
from dall_e import Encoder
__snake_case = Encoder()
if os.path.exists(__UpperCamelCase ):
__snake_case = torch.load(__UpperCamelCase )
else:
__snake_case = torch.hub.load_state_dict_from_url(__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__snake_case = ckpt.state_dict()
encoder.load_state_dict(__UpperCamelCase )
if config_path is not None:
__snake_case = FlavaImageCodebookConfig.from_pretrained(__UpperCamelCase )
else:
__snake_case = FlavaImageCodebookConfig()
__snake_case = FlavaImageCodebook(__UpperCamelCase ).eval()
__snake_case = encoder.state_dict()
__snake_case = upgrade_state_dict(__UpperCamelCase )
hf_model.load_state_dict(__UpperCamelCase )
__snake_case = hf_model.state_dict()
__snake_case = count_parameters(__UpperCamelCase )
__snake_case = count_parameters(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(__UpperCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 163 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> List[Any]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ):
A__ = []
for k, v in d.items():
A__ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
A__ = argparse.Namespace()
with open(__UpperCamelCase , 'r' ) as yaml_file:
try:
A__ = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader )
A__ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase , str(__UpperCamelCase ) ) )
return config
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = MobileViTVaConfig()
A__ = False
# dataset
if task_name.startswith('imagenet1k_' ):
A__ = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
A__ = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
A__ = 151
A__ = 512
A__ = 'ade20k-id2label.json'
A__ = True
elif task_name.startswith('voc_' ):
A__ = 21
A__ = 512
A__ = 'pascal-voc-id2label.json'
A__ = True
# orig_config
A__ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
A__ = getattr(__UpperCamelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__UpperCamelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A__ = getattr(__UpperCamelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A__ = getattr(__UpperCamelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
A__ = 'huggingface/label-files'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Dict:
if base_model:
A__ = ''
else:
A__ = 'mobilevitv2.'
A__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A__ = k[8:]
else:
A__ = k
if ".block." in k:
A__ = k_new.replace('.block.' , '.' )
if ".conv." in k:
A__ = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
A__ = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
A__ = k_new.replace('conv_1.' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A__ = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A__ = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
A__ = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A__ = [0, 1]
elif i == 4:
A__ = [0, 1, 2, 3]
elif i == 5:
A__ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A__ = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
A__ = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
A__ = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
A__ = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
A__ = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
A__ = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
A__ = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
A__ = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
A__ = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A ( __UpperCamelCase ) -> Tuple:
A__ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> str:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
A__ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
A__ = False
else:
A__ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
A__ = False
# remove and rename some keys of load the original model
A__ = checkpoint
remove_unused_keys(__UpperCamelCase )
A__ = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
A__ = outputs.logits
A__ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A__ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 9 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case = field(
metadata={"help": "The output directory where the model will be written."} , )
snake_case = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
snake_case = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
snake_case = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
snake_case = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = HfArgumentParser((ModelArguments,) )
((lowerCamelCase__ ) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowerCamelCase__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowerCamelCase__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowerCamelCase__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowerCamelCase__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCamelCase , decoder_config=__UpperCamelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowerCamelCase__ = decoder_config.decoder_start_token_id
lowerCamelCase__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowerCamelCase__ = decoder_config.bos_token_id
if pad_token_id is None:
lowerCamelCase__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowerCamelCase__ = decoder_config.eos_token_id
lowerCamelCase__ = decoder_start_token_id
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowerCamelCase__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 129 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = '''docs/source/en/_toctree.yml'''
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
def A ( __UpperCamelCase=False ) -> str:
with open(__UpperCamelCase , encoding='utf-8' ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]['sections']
# Then to the model doc
A__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A__ = api_doc[model_idx]['sections']
A__ = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
A__ = False
for idx, modality_doc in modalities_docs:
A__ = modality_doc['sections']
A__ = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
A__ = True
if overwrite:
A__ = new_modality_doc
if diff:
if overwrite:
A__ = model_doc
A__ = api_doc
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 9 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase ={
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 208 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : int ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
A__ : str = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 9 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
__A = BlipProcessor(_snake_case ,_snake_case )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : List[Any] ,**A : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_snake_case ).tokenizer
def UpperCamelCase_ ( self : Dict ,**A : List[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_snake_case ).image_processor
def UpperCamelCase_ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : List[Any] ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : List[Any] ):
__A = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
__A = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
__A = self.prepare_image_inputs()
__A = image_processor(_snake_case ,return_tensors="np" )
__A = processor(images=_snake_case ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : List[str] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
__A = "lower newer"
__A = processor(text=_snake_case )
__A = tokenizer(_snake_case ,return_token_type_ids=_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(_snake_case )
__A = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=_snake_case ,images=_snake_case )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 55 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9 | 0 |
__magic_name__ : Any = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 615 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["input_values", "attention_mask"]
def __init__( self : str , _snake_case : int = 1 , _snake_case : int = 1_60_00 , _snake_case : float = 0.0 , _snake_case : bool = False , _snake_case : int = 80 , _snake_case : int = 16 , _snake_case : int = 64 , _snake_case : str = "hann_window" , _snake_case : float = 1.0 , _snake_case : float = 80 , _snake_case : float = 76_00 , _snake_case : float = 1E-10 , _snake_case : int = 2 , _snake_case : bool = True , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
A__ = do_normalize
A__ = return_attention_mask
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = frame_signal_scale
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = reduction_factor
A__ = win_length * sampling_rate // 10_00
A__ = hop_length * sampling_rate // 10_00
A__ = optimal_fft_length(self.sample_size )
A__ = (self.n_fft // 2) + 1
A__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_snake_case )
A__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
A__ = np.array(_snake_case , np.intaa )
A__ = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A__ = padding_value
normed_input_values.append(_snake_case )
else:
A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _a ( self : Tuple , _snake_case : np.ndarray , ):
"""simple docstring"""
A__ = spectrogram(
_snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : List[str] , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[int] = None , **_snake_case : Tuple , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
else:
A__ = None
if audio_target is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
if inputs is None:
return inputs_target
else:
A__ = inputs_target['input_values']
A__ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def _a ( self : Tuple , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = False , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = isinstance(_snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
A__ = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [speech]
# needed to make pad() work on spectrogram inputs
A__ = self.feature_size
# convert into correct format for padding
if is_target:
A__ = [self._extract_mel_features(_snake_case ) for waveform in speech]
A__ = BatchFeature({'input_values': features} )
A__ = self.num_mel_bins
else:
A__ = BatchFeature({'input_values': speech} )
A__ = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
A__ = feature_size_hack
# convert input values to correct format
A__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A__ = (
attention_mask
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_snake_case , padding_value=self.padding_value )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 9 | 0 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase :
def __init__(self : List[Any] , A__ : Tuple ) -> Optional[Any]:
lowercase = str(id_ )
lowercase = None
lowercase = None
lowercase = []
lowercase = {} # {vertex:distance}
def __lt__(self : List[str] , A__ : Tuple ) -> Optional[int]:
return self.key < other.key
def __repr__(self : int ) -> Dict:
return self.id
def UpperCAmelCase__ (self : str , A__ : str ) -> Any:
self.neighbors.append(_snake_case )
def UpperCAmelCase__ (self : Tuple , A__ : str , A__ : Optional[Any] ) -> Tuple:
lowercase = weight
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , __UpperCamelCase )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = []
for u in graph:
lowercase = math.inf
lowercase = None
lowercase = 0
lowercase = graph[:]
while q:
lowercase = min(__UpperCamelCase )
q.remove(__UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase = u
lowercase = u.edges[v.id]
for i in range(1 , len(__UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for u in graph:
lowercase = math.inf
lowercase = None
lowercase = 0
lowercase = list(__UpperCamelCase )
hq.heapify(__UpperCamelCase )
while h:
lowercase = hq.heappop(__UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase = u
lowercase = u.edges[v.id]
hq.heapify(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ):
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = OmegaConf.load(__UpperCamelCase )
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = 'first_stage_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**__UpperCamelCase ).eval()
vqvae.load_state_dict(__UpperCamelCase )
A__ = UNetLDMModel(**__UpperCamelCase ).eval()
unet.load_state_dict(__UpperCamelCase )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , )
A__ = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipeline.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = len(__UpperCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_UpperCamelCase : Optional[int] = i + 1
else:
_UpperCamelCase : int = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 624 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase ( UpperCAmelCase_ ):
@staticmethod
@abstractmethod
def A_ ( UpperCAmelCase : ArgumentParser ) -> str:
raise NotImplementedError()
@abstractmethod
def A_ ( self : Union[str, Any] ) -> Dict:
raise NotImplementedError()
| 295 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 | 0 |
'''simple docstring'''
import pprint
import requests
a__ : Tuple = 'https://zenquotes.io/api'
def _lowercase ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _lowercase ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a__ : Optional[Any] = random_quotes()
pprint.pprint(response)
| 601 |
from __future__ import annotations
from typing import Any
def A ( __UpperCamelCase ) -> int:
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def __lowerCamelCase ( *__A , **__A ):
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
_A : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __A , __A , __A ):
__UpperCAmelCase = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
__UpperCAmelCase = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def __lowerCamelCase ( self , __A , __A ):
__UpperCAmelCase = object_detector(examples[0] , threshold=0.0 )
__UpperCAmelCase = len(_snake_case )
self.assertGreater(_snake_case , 0 )
self.assertEqual(
_snake_case , [
{
'score': ANY(_snake_case ),
'label': ANY(_snake_case ),
'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )},
}
for i in range(_snake_case )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __lowerCamelCase ( self ):
pass
@require_torch
def __lowerCamelCase ( self ):
__UpperCAmelCase = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
__UpperCAmelCase = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] , )
__UpperCAmelCase = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
__UpperCAmelCase = pipeline('zero-shot-object-detection' )
__UpperCAmelCase = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] , )
__UpperCAmelCase = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __lowerCamelCase ( self ):
pass
@require_torch
@slow
def __lowerCamelCase ( self ):
__UpperCAmelCase = 0.2
__UpperCAmelCase = pipeline('zero-shot-object-detection' )
__UpperCAmelCase = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
__UpperCAmelCase = 2
__UpperCAmelCase = pipeline('zero-shot-object-detection' )
__UpperCAmelCase = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] , )
| 126 |
from __future__ import annotations
def A ( __UpperCamelCase = 4 ) -> list[list[int]]:
A__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = matrix[::-1]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [x[::-1] for x in matrix]
return matrix
def A ( __UpperCamelCase ) -> None:
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 9 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
__magic_name__ : Tuple = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__magic_name__ : str = BASE_URL + """/user"""
# https://github.com/settings/tokens
__magic_name__ : Union[str, Any] = os.environ.get("""USER_TOKEN""", """""")
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = {
"""Authorization""": f"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("""\'USER_TOKEN\' field cannot be empty.""")
| 102 |
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
A__ = 10
return solutions
def A ( __UpperCamelCase = 2 ) -> int:
A__ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
A__ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 9 | 0 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCamelCase ( *SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__snake_case = list(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
__snake_case = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
__snake_case = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can\'t allocate memory", # CPU OOM
]
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1_28 ) -> List[str]:
"""simple docstring"""
if function is None:
return functools.partial(__UpperCamelCase , starting_batch_size=__UpperCamelCase )
__snake_case = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__snake_case = list(inspect.signature(__UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(__UpperCamelCase ) < (len(__UpperCamelCase ) + 1):
__snake_case = ", ".join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(__UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 163 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = a.name
lowerCamelCase__ = b.name
lowerCamelCase__ = """"""
lowerCamelCase__ = """"""
lowerCamelCase__ = a == b
lowerCamelCase__ = name_a
lowerCamelCase__ = name_b
return res
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__UpperCamelCase , __UpperCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , __UpperCamelCase , __UpperCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase )
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = list(model.graph.initializer )
lowerCamelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCamelCase__ = inits[i].name
lowerCamelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __UpperCamelCase , __UpperCamelCase )
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = os.path.dirname(__UpperCamelCase )
lowerCamelCase__ = os.path.basename(__UpperCamelCase )
lowerCamelCase__ = onnx.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) )
lowerCamelCase__ = list(model.graph.initializer )
lowerCamelCase__ = set()
lowerCamelCase__ = {}
lowerCamelCase__ = []
lowerCamelCase__ = 0
for i in range(len(__UpperCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__UpperCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__UpperCamelCase )
dup_set.add(__UpperCamelCase )
lowerCamelCase__ = inits[j].data_type
lowerCamelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , __UpperCamelCase )
total_reduced_size += mem_size
lowerCamelCase__ = inits[i].name
lowerCamelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__UpperCamelCase )
else:
lowerCamelCase__ = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowerCamelCase__ = sorted(__UpperCamelCase )
_remove_dup_initializers_from_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase__ = """optimized_""" + model_file_name
lowerCamelCase__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
onnx.save(__UpperCamelCase , __UpperCamelCase )
return new_model
| 129 |
SCREAMING_SNAKE_CASE__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
SCREAMING_SNAKE_CASE__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 9 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase ={
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 208 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : Any , **_snake_case : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
] , )
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
@slow
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _a ( self : Dict ):
"""simple docstring"""
pass
| 9 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
snake_case_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def UpperCamelCase_ ( self : Dict ,A : str=0 ):
__A = np.random.RandomState(_snake_case )
__A = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase_ ( self : List[str] ):
__A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_snake_case )
__A = self.get_dummy_inputs()
__A = pipe(**_snake_case ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__A = np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Dict ):
__A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
__A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__A = self.get_dummy_inputs()
__A = pipe(**_snake_case ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__A = np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : List[Any] ):
__A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
__A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__A = self.get_dummy_inputs()
__A = pipe(**_snake_case ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__A = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : List[str] ):
__A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
__A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__A = self.get_dummy_inputs()
__A = pipe(**_snake_case ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__A = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Tuple ):
__A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
__A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__A = self.get_dummy_inputs()
__A = pipe(**_snake_case ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__A = np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : List[str] ):
__A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
__A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__A = self.get_dummy_inputs()
__A = pipe(**_snake_case ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__A = np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Dict ):
__A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_snake_case )
__A = self.get_dummy_inputs()
__A = 3 * [inputs["prompt"]]
# forward
__A = pipe(**_snake_case )
__A = output.images[0, -3:, -3:, -1]
__A = self.get_dummy_inputs()
__A = 3 * [inputs.pop("prompt" )]
__A = pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="np" ,)
__A = text_inputs["input_ids"]
__A = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__A = prompt_embeds
# forward
__A = pipe(**_snake_case )
__A = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCamelCase_ ( self : List[Any] ):
__A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_snake_case )
__A = self.get_dummy_inputs()
__A = 3 * ["this is a negative prompt"]
__A = negative_prompt
__A = 3 * [inputs["prompt"]]
# forward
__A = pipe(**_snake_case )
__A = output.images[0, -3:, -3:, -1]
__A = self.get_dummy_inputs()
__A = 3 * [inputs.pop("prompt" )]
__A = []
for p in [prompt, negative_prompt]:
__A = pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="np" ,)
__A = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__A , __A = embeds
# forward
__A = pipe(**_snake_case )
__A = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Any ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self : Optional[Any] ):
__A = ort.SessionOptions()
__A = False
return options
def UpperCamelCase_ ( self : Dict ):
__A = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="onnx" ,safety_checker=_snake_case ,feature_extractor=_snake_case ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=_snake_case )
__A = "A painting of a squirrel eating a burger"
np.random.seed(0 )
__A = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=10 ,output_type="np" )
__A = output.images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__A = np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self : Dict ):
__A = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,subfolder="scheduler" ,revision="onnx" )
__A = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,revision="onnx" ,scheduler=_snake_case ,safety_checker=_snake_case ,feature_extractor=_snake_case ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=_snake_case )
__A = "open neural network exchange"
__A = np.random.RandomState(0 )
__A = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_snake_case ,output_type="np" )
__A = output.images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__A = np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self : List[str] ):
__A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,subfolder="scheduler" ,revision="onnx" )
__A = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,revision="onnx" ,scheduler=_snake_case ,safety_checker=_snake_case ,feature_extractor=_snake_case ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=_snake_case )
__A = "open neural network exchange"
__A = np.random.RandomState(0 )
__A = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_snake_case ,output_type="np" )
__A = output.images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__A = np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self : Tuple ):
__A = 0
def test_callback_fn(A : int ,A : int ,A : np.ndarray ) -> None:
__A = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__A = latents[0, -3:, -3:, -1]
__A = np.array(
[-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__A = latents[0, -3:, -3:, -1]
__A = np.array(
[-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
__A = False
__A = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,revision="onnx" ,safety_checker=_snake_case ,feature_extractor=_snake_case ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=_snake_case )
__A = "Andromeda galaxy in a bottle"
__A = np.random.RandomState(0 )
pipe(
prompt=_snake_case ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=_snake_case ,callback=_snake_case ,callback_steps=1 ,)
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase_ ( self : Optional[int] ):
__A = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,revision="onnx" ,safety_checker=_snake_case ,feature_extractor=_snake_case ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
assert isinstance(_snake_case ,_snake_case )
assert pipe.safety_checker is None
__A = pipe("example prompt" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
__A = OnnxStableDiffusionPipeline.from_pretrained(_snake_case )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__A = pipe("example prompt" ,num_inference_steps=2 ).images[0]
assert image is not None
| 55 |
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def a_ ( __lowerCAmelCase ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def a_ ( ):
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCAmelCase__ = [1, 2, 3]
with pytest.raises(__UpperCamelCase ):
with parallel_backend('''unsupported backend''' ):
map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=2 )
with pytest.raises(__UpperCamelCase ):
with parallel_backend('''unsupported backend''' ):
map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def a_ ( __lowerCAmelCase ):
lowerCAmelCase__ = [1, 2]
lowerCAmelCase__ = {'''a''': 1, '''b''': 2}
lowerCAmelCase__ = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCAmelCase__ = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCAmelCase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCAmelCase__ = [2, 3]
lowerCAmelCase__ = {'''a''': 2, '''b''': 3}
lowerCAmelCase__ = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCAmelCase__ = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCAmelCase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
| 615 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A__ = gamma_a
A__ = gamma_a
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( ) -> Dict:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['model']
A__ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(__UpperCamelCase )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
if hor == 128:
lowercase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowercase = (32, 128, 256)
lowercase = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
lowercase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowercase = (32, 64, 128, 256)
lowercase = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
lowercase = torch.load(f'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
lowercase = model.state_dict()
lowercase = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
lowercase = UNetaDModel(**__UpperCamelCase )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowercase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(f'hub/hopper-medium-v2/unet/hor{hor}/config.json' , "w" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
lowercase = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
lowercase = model
lowercase = UNetaDModel(**__UpperCamelCase )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowercase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 310 |
SCREAMING_SNAKE_CASE__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list[str]:
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 9 | 0 |
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
_UpperCamelCase : Any = len(bin(__UpperCamelCase )[3:] )
_UpperCamelCase : Dict = bin(abs(__UpperCamelCase ) - (1 << binary_number_length) )[3:]
_UpperCamelCase : Optional[int] = (
(
"1"
+ "0" * (binary_number_length - len(__UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = 0
A__ = len(__UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , point + 1 , __UpperCamelCase )
def A ( __UpperCamelCase ) -> List[str]:
if collection != sorted(__UpperCamelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
SCREAMING_SNAKE_CASE__ = 0
if debug == 1:
SCREAMING_SNAKE_CASE__ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
SCREAMING_SNAKE_CASE__ = 6_7
SCREAMING_SNAKE_CASE__ = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print('''Not found''')
| 9 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_UpperCAmelCase : str = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_UpperCAmelCase : Union[str, Any] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
if "://" in dataset_path:
lowerCamelCase__ : List[str] = dataset_path.split('://' )[1]
return dataset_path
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = not is_remote_filesystem(__UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) )
else:
fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCamelCase__ : str = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Optional[Any] = threading.Lock()
| 295 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a__ : Tuple = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a__ : int = [ord(letter) for letter in string.ascii_lowercase]
a__ : List[str] = {ord(char) for char in VALID_CHARS}
a__ : int = ['the', 'be', 'to', 'of', 'and', 'in', 'that', 'have']
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = """"""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(__UpperCamelCase ) ,__UpperCamelCase ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCamelCase )
return decoded
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = []
for key in product(__UpperCamelCase ,repeat=3 ):
__UpperCamelCase = try_key(__UpperCamelCase ,__UpperCamelCase )
if encoded is not None:
possibles.append(__UpperCamelCase )
return possibles
def _lowercase ( __A ,__A ):
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def _lowercase ( __A = "p059_cipher.txt" ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(__UpperCamelCase ).parent.joinpath(__UpperCamelCase ).read_text(encoding="""utf-8""" )
__UpperCamelCase = [int(__UpperCamelCase ) for number in data.strip().split(""",""" )]
__UpperCamelCase = filter_valid_chars(__UpperCamelCase )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(__UpperCamelCase ,__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(__UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 601 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE__ = [0, 2_5, 5_0]
SCREAMING_SNAKE_CASE__ = [2_5, 5_0, 7_5]
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE__ = np.ones(7_5)
SCREAMING_SNAKE_CASE__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 9 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
__UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__UpperCAmelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__UpperCAmelCase = {'unk_token': '<unk>'}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
__UpperCAmelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__UpperCAmelCase = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_snake_case , _snake_case )
def __lowerCamelCase ( self , **__A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def __lowerCamelCase ( self , **__A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def __lowerCamelCase ( self , **__A ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
__UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case )
__UpperCAmelCase = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _snake_case )
self.assertIsInstance(processor_fast.tokenizer , _snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _snake_case )
self.assertIsInstance(processor_fast.image_processor , _snake_case )
def __lowerCamelCase ( self ):
__UpperCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCAmelCase = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
__UpperCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = image_processor(_snake_case , return_tensors='np' )
__UpperCAmelCase = processor(images=_snake_case , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__UpperCAmelCase = 'lower newer'
__UpperCAmelCase = processor(text=_snake_case )
__UpperCAmelCase = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__UpperCAmelCase = 'lower newer'
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase = processor.batch_decode(_snake_case )
__UpperCAmelCase = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__UpperCAmelCase = 'lower newer'
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 126 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _a ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _a ( self : int , _snake_case : int , _snake_case : List[str] ):
"""simple docstring"""
A__ = object_detector(examples[0] , threshold=0.0 )
A__ = len(_snake_case )
self.assertGreater(_snake_case , 0 )
self.assertEqual(
_snake_case , [
{
'score': ANY(_snake_case ),
'label': ANY(_snake_case ),
'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )},
}
for i in range(_snake_case )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@require_torch
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
A__ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
A__ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : int ):
"""simple docstring"""
pass
@require_torch
@slow
def _a ( self : str ):
"""simple docstring"""
A__ = 0.2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = 2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 9 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__magic_name__ : Union[str, Any] = ["""bert-base-uncased""", """bert-base-cased"""]
__magic_name__ : List[str] = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase__ ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , _A ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Tuple = tokenizer
UpperCamelCase : int = AutoConfig.from_pretrained(_snake_case )
UpperCamelCase : Optional[int] = TFAutoModel.from_config(_snake_case )
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.tokenizer(_snake_case )
UpperCamelCase : List[Any] = self.bert(**_snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase : Union[str, Any] = [
BertTokenizer.from_pretrained(_snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase : List[str] = [TFBertTokenizer.from_pretrained(_snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_snake_case , use_fast_bert_tokenizer=_snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase : str = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we\'re going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
UpperCamelCase : int = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _a ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase : Tuple = tokenizer(_snake_case , return_tensors="""tf""" , padding="""longest""" )
UpperCamelCase : Optional[Any] = tf_tokenizer(_snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _a ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase : str = tf_tokenizer(self.paired_sentences )
UpperCamelCase : Tuple = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _a ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase : int = tf.function(_snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase : Union[str, Any] = tf.constant(_snake_case )
UpperCamelCase : Dict = compiled_tokenizer(_snake_case )
UpperCamelCase : int = tf_tokenizer(_snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _a ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase : Dict = ModelToSave(tokenizer=_snake_case )
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase : Tuple = model(_snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase : List[Any] = Path(_snake_case ) / """saved.model"""
model.save(_snake_case )
UpperCamelCase : List[str] = tf.keras.models.load_model(_snake_case )
UpperCamelCase : Tuple = loaded_model(_snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 102 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class __magic_name__ ( UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE : Optional[int] = "bloom"
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["past_key_values"]
_SCREAMING_SNAKE_CASE : int = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Dict , snake_case_ : Tuple=250880 , snake_case_ : Dict=64 , snake_case_ : Optional[int]=2 , snake_case_ : int=8 , snake_case_ : Optional[int]=1e-5 , snake_case_ : List[Any]=0.02 , snake_case_ : Optional[Any]=True , snake_case_ : Union[str, Any]=1 , snake_case_ : List[Any]=2 , snake_case_ : Optional[int]=False , snake_case_ : Union[str, Any]=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : int=1 , snake_case_ : List[Any]=False , **snake_case_ : str , ):
__snake_case = vocab_size
# Backward compatibility with n_embed kwarg
__snake_case = kwargs.pop("n_embed" , _snake_case )
__snake_case = hidden_size if n_embed is None else n_embed
__snake_case = n_layer
__snake_case = n_head
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = use_cache
__snake_case = pretraining_tp
__snake_case = apply_residual_connection_post_layernorm
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = bos_token_id
__snake_case = eos_token_id
__snake_case = slow_but_exact
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
class __magic_name__ ( UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = version.parse('1.12' )
def __init__( self : str , snake_case_ : PretrainedConfig , snake_case_ : str = "default" , snake_case_ : List[PatchingSpec] = None , snake_case_ : bool = False , ):
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case )
if not getattr(self._config , "pad_token_id" , _snake_case ):
# TODO: how to do that better?
__snake_case = 0
@property
def lowerCAmelCase ( self : Union[str, Any] ):
__snake_case = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_snake_case , direction="inputs" , inverted_values_shape=_snake_case )
__snake_case = {0: "batch", 1: "past_sequence + sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCAmelCase ( self : List[Any] ):
return self._config.n_layer
@property
def lowerCAmelCase ( self : Union[str, Any] ):
return self._config.n_head
@property
def lowerCAmelCase ( self : Union[str, Any] ):
return 1e-3
def lowerCAmelCase ( self : Dict , snake_case_ : "PreTrainedTokenizer" , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional["TensorType"] = None , ):
__snake_case = super(_snake_case , self ).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
# We need to order the input in the way they appears in the forward()
__snake_case = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case , __snake_case = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case = self._config.hidden_size // self.num_attention_heads
__snake_case = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__snake_case = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__snake_case = [
(torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(self.num_layers )
]
__snake_case = common_inputs["attention_mask"]
if self.use_past:
__snake_case = ordered_inputs["attention_mask"].dtype
__snake_case = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self : Optional[Any] ):
return 13
| 163 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> List[Any]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ):
A__ = []
for k, v in d.items():
A__ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
A__ = argparse.Namespace()
with open(__UpperCamelCase , 'r' ) as yaml_file:
try:
A__ = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader )
A__ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase , str(__UpperCamelCase ) ) )
return config
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = MobileViTVaConfig()
A__ = False
# dataset
if task_name.startswith('imagenet1k_' ):
A__ = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
A__ = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
A__ = 151
A__ = 512
A__ = 'ade20k-id2label.json'
A__ = True
elif task_name.startswith('voc_' ):
A__ = 21
A__ = 512
A__ = 'pascal-voc-id2label.json'
A__ = True
# orig_config
A__ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
A__ = getattr(__UpperCamelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__UpperCamelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A__ = getattr(__UpperCamelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A__ = getattr(__UpperCamelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
A__ = 'huggingface/label-files'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Dict:
if base_model:
A__ = ''
else:
A__ = 'mobilevitv2.'
A__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A__ = k[8:]
else:
A__ = k
if ".block." in k:
A__ = k_new.replace('.block.' , '.' )
if ".conv." in k:
A__ = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
A__ = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
A__ = k_new.replace('conv_1.' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A__ = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A__ = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
A__ = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A__ = [0, 1]
elif i == 4:
A__ = [0, 1, 2, 3]
elif i == 5:
A__ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A__ = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
A__ = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
A__ = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
A__ = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
A__ = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
A__ = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
A__ = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
A__ = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
A__ = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A ( __UpperCamelCase ) -> Tuple:
A__ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> str:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
A__ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
A__ = False
else:
A__ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
A__ = False
# remove and rename some keys of load the original model
A__ = checkpoint
remove_unused_keys(__UpperCamelCase )
A__ = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
A__ = outputs.logits
A__ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A__ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 9 | 0 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
snake_case = "align_text_model"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=3_0522 , SCREAMING_SNAKE_CASE_ : Optional[int]=768 , SCREAMING_SNAKE_CASE_ : str=12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : List[Any]=3072 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=512 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1e-12 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : List[str]="absolute" , SCREAMING_SNAKE_CASE_ : Tuple=True , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
super().__init__(**_snake_case )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = pad_token_id
@classmethod
def __UpperCAmelCase ( cls : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : List[str] ):
cls._set_token_in_kwargs(_snake_case )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_snake_case , **_snake_case )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
lowerCamelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
snake_case = "align_vision_model"
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 600 , SCREAMING_SNAKE_CASE_ : float = 2.0 , SCREAMING_SNAKE_CASE_ : float = 3.1 , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , SCREAMING_SNAKE_CASE_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , SCREAMING_SNAKE_CASE_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , SCREAMING_SNAKE_CASE_ : List[int] = [] , SCREAMING_SNAKE_CASE_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , SCREAMING_SNAKE_CASE_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , SCREAMING_SNAKE_CASE_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE_ : float = 0.2_5 , SCREAMING_SNAKE_CASE_ : str = "swish" , SCREAMING_SNAKE_CASE_ : int = 2560 , SCREAMING_SNAKE_CASE_ : str = "mean" , SCREAMING_SNAKE_CASE_ : float = 0.0_2 , SCREAMING_SNAKE_CASE_ : float = 0.0_0_1 , SCREAMING_SNAKE_CASE_ : float = 0.9_9 , SCREAMING_SNAKE_CASE_ : float = 0.2 , **SCREAMING_SNAKE_CASE_ : Tuple , ):
super().__init__(**_snake_case )
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = width_coefficient
lowerCamelCase__ = depth_coefficient
lowerCamelCase__ = depth_divisor
lowerCamelCase__ = kernel_sizes
lowerCamelCase__ = in_channels
lowerCamelCase__ = out_channels
lowerCamelCase__ = depthwise_padding
lowerCamelCase__ = strides
lowerCamelCase__ = num_block_repeats
lowerCamelCase__ = expand_ratios
lowerCamelCase__ = squeeze_expansion_ratio
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dim
lowerCamelCase__ = pooling_type
lowerCamelCase__ = initializer_range
lowerCamelCase__ = batch_norm_eps
lowerCamelCase__ = batch_norm_momentum
lowerCamelCase__ = drop_connect_rate
lowerCamelCase__ = sum(_snake_case ) * 4
@classmethod
def __UpperCAmelCase ( cls : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
cls._set_token_in_kwargs(_snake_case )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
lowerCamelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
snake_case = "align"
snake_case = True
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Optional[int]=640 , SCREAMING_SNAKE_CASE_ : List[str]=1.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , **SCREAMING_SNAKE_CASE_ : Tuple , ):
super().__init__(**_snake_case )
if text_config is None:
lowerCamelCase__ = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
lowerCamelCase__ = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
lowerCamelCase__ = AlignTextConfig(**_snake_case )
lowerCamelCase__ = AlignVisionConfig(**_snake_case )
lowerCamelCase__ = projection_dim
lowerCamelCase__ = temperature_init_value
lowerCamelCase__ = initializer_range
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE_ : AlignTextConfig , SCREAMING_SNAKE_CASE_ : AlignVisionConfig , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case )
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 129 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = '''docs/source/en/_toctree.yml'''
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
def A ( __UpperCamelCase=False ) -> str:
with open(__UpperCamelCase , encoding='utf-8' ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]['sections']
# Then to the model doc
A__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A__ = api_doc[model_idx]['sections']
A__ = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
A__ = False
for idx, modality_doc in modalities_docs:
A__ = modality_doc['sections']
A__ = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
A__ = True
if overwrite:
A__ = new_modality_doc
if diff:
if overwrite:
A__ = model_doc
A__ = api_doc
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 9 | 0 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 208 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : int ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
A__ : str = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 9 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def UpperCAmelCase ( a_ ) -> List[str]: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def UpperCAmelCase ( a_ ) -> Optional[Any]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
class UpperCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
__A = {}
__A = []
__A = 1
__A = [1, 2]
__A = {"a": 1, "b": 2}
__A = {"a": [1, 2], "b": [3, 4]}
__A = {"a": {"1": 1}, "b": 2}
__A = {"a": 1, "b": 2, "c": 3, "d": 4}
__A = {}
__A = []
__A = 2
__A = [2, 3]
__A = {"a": 2, "b": 3}
__A = {"a": [2, 3], "b": [4, 5]}
__A = {"a": {"1": 2}, "b": 3}
__A = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(_snake_case ,_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ) ,_snake_case )
__A = 2
self.assertEqual(map_nested(_snake_case ,_snake_case ,num_proc=_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ,num_proc=_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ,num_proc=_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ,num_proc=_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ,num_proc=_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ,num_proc=_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ,num_proc=_snake_case ) ,_snake_case )
self.assertEqual(map_nested(_snake_case ,_snake_case ,num_proc=_snake_case ) ,_snake_case )
__A = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
__A = {"a": 2, "b": 0, "c": 2}
__A = {
"a": np.eye(2 ).astype(_snake_case ),
"b": np.zeros(3 ).astype(_snake_case ),
"c": np.ones(2 ).astype(_snake_case ),
}
self.assertEqual(map_nested(_snake_case ,_snake_case ,map_numpy=_snake_case ) ,_snake_case )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_snake_case ,_snake_case ,map_numpy=_snake_case ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
self.assertEqual(map_nested(_snake_case ,_snake_case ,map_numpy=_snake_case ,num_proc=_snake_case ) ,_snake_case )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_snake_case ,_snake_case ,map_numpy=_snake_case ,num_proc=_snake_case ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
with self.assertRaises(_snake_case ): # can't pickle a local lambda
map_nested(lambda A : x + 1 ,_snake_case ,num_proc=_snake_case )
def UpperCamelCase_ ( self : Optional[int] ):
__A = {"a": 1, "b": 2}
__A = {"a": 3, "b": 4}
__A = {"a": 5, "b": 6}
__A = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_snake_case ,_snake_case ,_snake_case ) ) ,_snake_case )
def UpperCamelCase_ ( self : Tuple ):
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "bar"
__A = Foo()
self.assertEqual(foo.my_attr ,"bar" )
with temporary_assignment(_snake_case ,"my_attr" ,"BAR" ):
self.assertEqual(foo.my_attr ,"BAR" )
self.assertEqual(foo.my_attr ,"bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(1_6, 1_6, 1_6),
(1_6, 1_7, 1_6),
(1_7, 1_6, 1_6),
] , )
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
__A = {F'''{i}''': i for i in range(__UpperCamelCase )}
__A = map_nested(lambda a_ : x + 1_0 , __UpperCamelCase , num_proc=__UpperCamelCase , parallel_min_length=1_6 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class UpperCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@require_tf
def UpperCamelCase_ ( self : Optional[int] ):
import tensorflow as tf
from tensorflow.keras import layers
__A = layers.Dense(2 )
def gen_random_output():
__A = tf.random.uniform((1, 3) )
return model(_snake_case ).numpy()
with temp_seed(42 ,set_tensorflow=_snake_case ):
__A = gen_random_output()
with temp_seed(42 ,set_tensorflow=_snake_case ):
__A = gen_random_output()
__A = gen_random_output()
np.testing.assert_equal(_snake_case ,_snake_case )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@require_torch
def UpperCamelCase_ ( self : Optional[Any] ):
import torch
def gen_random_output():
__A = torch.nn.Linear(3 ,2 )
__A = torch.rand(1 ,3 )
return model(_snake_case ).detach().numpy()
with temp_seed(42 ,set_pytorch=_snake_case ):
__A = gen_random_output()
with temp_seed(42 ,set_pytorch=_snake_case ):
__A = gen_random_output()
__A = gen_random_output()
np.testing.assert_equal(_snake_case ,_snake_case )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
def UpperCamelCase_ ( self : List[Any] ):
def gen_random_output():
return np.random.rand(1 ,3 )
with temp_seed(42 ):
__A = gen_random_output()
with temp_seed(42 ):
__A = gen_random_output()
__A = gen_random_output()
np.testing.assert_equal(_snake_case ,_snake_case )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@pytest.mark.parametrize("input_data" , [{}] )
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
__A = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
__A = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
__A = A(x=1 , y="foobar" )
__A = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
__A = {"a": {"b": A(x=1_0 , y="foo" )}, "c": [A(x=2_0 , y="bar" )]}
__A = {"a": {"b": {"x": 1_0, "y": "foo"}}, "c": [{"x": 2_0, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=1_0 , y="foo" )] )
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
with Pool(2 ) as pool:
__A = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 1_0 ) )
assert out.count("hello" ) == 1_0
assert out.count("there" ) == 1_0
assert len(__UpperCamelCase ) == 2_0
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__A = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 1_0 ) )
assert out.count("hello" ) == 1_0
assert out.count("there" ) == 1_0
assert len(__UpperCamelCase ) == 2_0
# check that we get items as fast as possible
with Pool(2 ) as pool:
__A = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4
| 55 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ (UpperCAmelCase_ , unittest.TestCase ):
lowercase_ : Optional[Any] = KandinskyInpaintPipeline
lowercase_ : Dict = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
lowercase_ : Any = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
lowercase_ : Tuple = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase_ : Dict = False
@property
def A__ ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def A__ ( self : int ):
"""simple docstring"""
return 32
@property
def A__ ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self : List[str] ):
"""simple docstring"""
return 1_00
@property
def A__ ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def A__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
lowerCAmelCase__ = MultilingualCLIP(_snake_case )
lowerCAmelCase__ = text_encoder.eval()
return text_encoder
@property
def A__ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase__ = UNetaDConditionModel(**_snake_case )
return model
@property
def A__ ( self : int ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = self.dummy_tokenizer
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_snake_case , set_alpha_to_one=_snake_case , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_snake_case , )
lowerCAmelCase__ = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : List[str]=0 ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create mask
lowerCAmelCase__ = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase__ = 0
if str(_snake_case ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(_snake_case )
else:
lowerCAmelCase__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase__ = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def A__ ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**_snake_case )
lowerCAmelCase__ = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(_snake_case ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def A__ ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def A__ ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCAmelCase__ = np.ones((7_68, 7_68) , dtype=np.floataa )
lowerCAmelCase__ = 0
lowerCAmelCase__ = '''a hat'''
lowerCAmelCase__ = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
lowerCAmelCase__ = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
_snake_case , generator=_snake_case , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCAmelCase__ = pipeline(
_snake_case , image=_snake_case , mask_image=_snake_case , image_embeds=_snake_case , negative_image_embeds=_snake_case , generator=_snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 615 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["input_values", "attention_mask"]
def __init__( self : str , _snake_case : int = 1 , _snake_case : int = 1_60_00 , _snake_case : float = 0.0 , _snake_case : bool = False , _snake_case : int = 80 , _snake_case : int = 16 , _snake_case : int = 64 , _snake_case : str = "hann_window" , _snake_case : float = 1.0 , _snake_case : float = 80 , _snake_case : float = 76_00 , _snake_case : float = 1E-10 , _snake_case : int = 2 , _snake_case : bool = True , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
A__ = do_normalize
A__ = return_attention_mask
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = frame_signal_scale
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = reduction_factor
A__ = win_length * sampling_rate // 10_00
A__ = hop_length * sampling_rate // 10_00
A__ = optimal_fft_length(self.sample_size )
A__ = (self.n_fft // 2) + 1
A__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_snake_case )
A__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
A__ = np.array(_snake_case , np.intaa )
A__ = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A__ = padding_value
normed_input_values.append(_snake_case )
else:
A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _a ( self : Tuple , _snake_case : np.ndarray , ):
"""simple docstring"""
A__ = spectrogram(
_snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : List[str] , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[int] = None , **_snake_case : Tuple , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
else:
A__ = None
if audio_target is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
if inputs is None:
return inputs_target
else:
A__ = inputs_target['input_values']
A__ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def _a ( self : Tuple , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = False , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = isinstance(_snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
A__ = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [speech]
# needed to make pad() work on spectrogram inputs
A__ = self.feature_size
# convert into correct format for padding
if is_target:
A__ = [self._extract_mel_features(_snake_case ) for waveform in speech]
A__ = BatchFeature({'input_values': features} )
A__ = self.num_mel_bins
else:
A__ = BatchFeature({'input_values': speech} )
A__ = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
A__ = feature_size_hack
# convert input values to correct format
A__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A__ = (
attention_mask
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_snake_case , padding_value=self.padding_value )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 9 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = 0
while number > 0:
lowercase = number % 10
sum_of_digits += last_digit
lowercase = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCAmelCase_ ( lowerCAmelCase_ = 100 ):
"""simple docstring"""
lowercase = factorial(__UpperCamelCase )
lowercase = split_and_add(__UpperCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 310 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = OmegaConf.load(__UpperCamelCase )
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = 'first_stage_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**__UpperCamelCase ).eval()
vqvae.load_state_dict(__UpperCamelCase )
A__ = UNetLDMModel(**__UpperCamelCase ).eval()
unet.load_state_dict(__UpperCamelCase )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , )
A__ = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipeline.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
lowerCamelCase__ = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 624 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 | 0 |
from math import factorial
_UpperCAmelCase : List[Any] = {str(digit): factorial(digit) for digit in range(10)}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 60 , _UpperCAmelCase = 100_0000 ) -> int:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
lowerCamelCase__ : List[Any] = 0
# the cached sizes of the previous chains
lowerCamelCase__ : List[str] = {}
for start_chain_element in range(1 , __UpperCamelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ : Optional[int] = set()
lowerCamelCase__ : str = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ : List[Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__UpperCamelCase )
chain_set_length += 1
lowerCamelCase__ : int = digit_factorial_sum(__UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ : Optional[Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 295 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a__ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_)
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , **lowercase ) -> List[Any]:
super().__init__(**_snake_case )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowercase , **lowercase ) -> Optional[int]:
return super().__call__(_snake_case , **_snake_case )
def __lowerCamelCase ( self , **lowercase ) -> Dict:
__UpperCamelCase = {}
if "candidate_labels" in kwargs:
__UpperCamelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__UpperCamelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def __lowerCamelCase ( self , lowercase , lowercase=None , lowercase="This is a photo of {}." ) -> str:
__UpperCamelCase = load_image(_snake_case )
__UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCamelCase = candidate_labels
__UpperCamelCase = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
__UpperCamelCase = self.tokenizer(_snake_case , return_tensors=self.framework , padding=_snake_case )
__UpperCamelCase = [text_inputs]
return inputs
def __lowerCamelCase ( self , lowercase ) -> Optional[Any]:
__UpperCamelCase = model_inputs.pop("""candidate_labels""" )
__UpperCamelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _snake_case ):
__UpperCamelCase = text_inputs[0]
else:
# Batching case.
__UpperCamelCase = text_inputs[0][0]
__UpperCamelCase = self.model(**_snake_case , **_snake_case )
__UpperCamelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def __lowerCamelCase ( self , lowercase ) -> Optional[Any]:
__UpperCamelCase = model_outputs.pop("""candidate_labels""" )
__UpperCamelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
__UpperCamelCase = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCamelCase = probs.tolist()
if not isinstance(_snake_case , _snake_case ):
__UpperCamelCase = [scores]
elif self.framework == "tf":
__UpperCamelCase = stable_softmax(_snake_case , axis=-1 )
__UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__UpperCamelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_snake_case , _snake_case ) , key=lambda lowercase : -x[0] )
]
return result
| 601 |
from __future__ import annotations
from typing import Any
def A ( __UpperCamelCase ) -> int:
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A: Optional[int] = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Union[str, Any] = ["""DeiTFeatureExtractor"""]
_A: Optional[int] = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Dict = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: List[Any] = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_A: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 126 |
from __future__ import annotations
def A ( __UpperCamelCase = 4 ) -> list[list[int]]:
A__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = matrix[::-1]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [x[::-1] for x in matrix]
return matrix
def A ( __UpperCamelCase ) -> None:
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 9 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
UpperCamelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
UpperCamelCase : Dict = jax.device_count()
UpperCamelCase : Tuple = num_samples * [prompt]
UpperCamelCase : Optional[Any] = sd_pipe.prepare_inputs(_snake_case )
UpperCamelCase : str = replicate(_snake_case )
UpperCamelCase : str = shard(_snake_case )
UpperCamelCase : Dict = jax.random.PRNGKey(0 )
UpperCamelCase : List[str] = jax.random.split(_snake_case , jax.device_count() )
UpperCamelCase : Tuple = sd_pipe(_snake_case , _snake_case , _snake_case , num_inference_steps=2_5 , jit=_snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCamelCase : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase : int = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase : Optional[Any] = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = """stabilityai/stable-diffusion-2"""
UpperCamelCase , UpperCamelCase : str = FlaxDPMSolverMultistepScheduler.from_pretrained(_snake_case , subfolder="""scheduler""" )
UpperCamelCase , UpperCamelCase : Any = FlaxStableDiffusionPipeline.from_pretrained(
_snake_case , scheduler=_snake_case , revision="""bf16""" , dtype=jnp.bfloataa , )
UpperCamelCase : Tuple = scheduler_params
UpperCamelCase : Any = """A painting of a squirrel eating a burger"""
UpperCamelCase : str = jax.device_count()
UpperCamelCase : str = num_samples * [prompt]
UpperCamelCase : int = sd_pipe.prepare_inputs(_snake_case )
UpperCamelCase : List[Any] = replicate(_snake_case )
UpperCamelCase : Optional[int] = shard(_snake_case )
UpperCamelCase : Dict = jax.random.PRNGKey(0 )
UpperCamelCase : Any = jax.random.split(_snake_case , jax.device_count() )
UpperCamelCase : Dict = sd_pipe(_snake_case , _snake_case , _snake_case , num_inference_steps=2_5 , jit=_snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCamelCase : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase : Dict = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase : Optional[int] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 102 |
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
A__ = 10
return solutions
def A ( __UpperCamelCase = 2 ) -> int:
A__ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
A__ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 9 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None
_SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def lowerCAmelCase ( cls : List[Any] ):
return cls()
@dataclass
class __magic_name__ ( UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : KarrasVeSchedulerState
class __magic_name__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
@property
def lowerCAmelCase ( self : int ):
return True
@register_to_config
def __init__( self : Optional[int] , snake_case_ : float = 0.02 , snake_case_ : float = 100 , snake_case_ : float = 1.007 , snake_case_ : float = 80 , snake_case_ : float = 0.05 , snake_case_ : float = 50 , ):
pass
def lowerCAmelCase ( self : Dict ):
return KarrasVeSchedulerState.create()
def lowerCAmelCase ( self : List[str] , snake_case_ : KarrasVeSchedulerState , snake_case_ : int , snake_case_ : Tuple = () ):
__snake_case = jnp.arange(0 , _snake_case )[::-1].copy()
__snake_case = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_snake_case , schedule=jnp.array(_snake_case , dtype=jnp.floataa ) , timesteps=_snake_case , )
def lowerCAmelCase ( self : Tuple , snake_case_ : KarrasVeSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : float , snake_case_ : random.KeyArray , ):
if self.config.s_min <= sigma <= self.config.s_max:
__snake_case = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
__snake_case = 0
# sample eps ~ N(0, S_noise^2 * I)
__snake_case = random.split(_snake_case , num=1 )
__snake_case = self.config.s_noise * random.normal(key=_snake_case , shape=sample.shape )
__snake_case = sigma + gamma * sigma
__snake_case = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : KarrasVeSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : float , snake_case_ : float , snake_case_ : jnp.ndarray , snake_case_ : bool = True , ):
__snake_case = sample_hat + sigma_hat * model_output
__snake_case = (sample_hat - pred_original_sample) / sigma_hat
__snake_case = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_snake_case , derivative=_snake_case , state=_snake_case )
def lowerCAmelCase ( self : int , snake_case_ : KarrasVeSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : float , snake_case_ : float , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : bool = True , ):
__snake_case = sample_prev + sigma_prev * model_output
__snake_case = (sample_prev - pred_original_sample) / sigma_prev
__snake_case = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_snake_case , derivative=_snake_case , state=_snake_case )
def lowerCAmelCase ( self : List[Any] , snake_case_ : KarrasVeSchedulerState , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : str ):
raise NotImplementedError()
| 163 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__magic_name__ = logging.get_logger(__name__)
def _A ( __lowercase ):
"""simple docstring"""
if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__UpperCamelCase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
snake_case = ["pixel_values"]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : Tuple , ):
super().__init__(**_snake_case )
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 256}
lowerCamelCase__ = get_size_dict(_snake_case , default_to_square=_snake_case )
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase__ = get_size_dict(_snake_case , param_name="""crop_size""" )
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = resample
lowerCamelCase__ = do_rescale
lowerCamelCase__ = rescale_factor
lowerCamelCase__ = offset
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : int , ):
lowerCamelCase__ = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" in size:
lowerCamelCase__ = get_resize_output_image_size(_snake_case , size["""shortest_edge"""] , default_to_square=_snake_case )
elif "height" in size and "width" in size:
lowerCamelCase__ = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}""" )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
lowerCamelCase__ = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have \'height\' and \'width\' as keys. Got {size.keys()}""" )
return center_crop(_snake_case , size=(size["""height"""], size["""width"""]) , data_format=_snake_case , **_snake_case )
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, float] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
lowerCamelCase__ = image.astype(np.floataa )
if offset:
lowerCamelCase__ = image - (scale / 2)
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Tuple , ):
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowerCamelCase__ = to_numpy_array(_snake_case )
if do_resize:
lowerCamelCase__ = self.resize(image=_snake_case , size=_snake_case , resample=_snake_case )
if do_center_crop:
lowerCamelCase__ = self.center_crop(_snake_case , size=_snake_case )
if do_rescale:
lowerCamelCase__ = self.rescale(image=_snake_case , scale=_snake_case , offset=_snake_case )
if do_normalize:
lowerCamelCase__ = self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case )
lowerCamelCase__ = to_channel_dimension_format(_snake_case , _snake_case )
return image
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : str , ):
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = resample if resample is not None else self.resample
lowerCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ = offset if offset is not None else self.offset
lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ = image_std if image_std is not None else self.image_std
lowerCamelCase__ = size if size is not None else self.size
lowerCamelCase__ = get_size_dict(_snake_case , default_to_square=_snake_case )
lowerCamelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ = get_size_dict(_snake_case , param_name="""crop_size""" )
if not valid_images(_snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowerCamelCase__ = make_batched(_snake_case )
lowerCamelCase__ = [
[
self._preprocess_image(
image=_snake_case , do_resize=_snake_case , size=_snake_case , resample=_snake_case , do_center_crop=_snake_case , crop_size=_snake_case , do_rescale=_snake_case , rescale_factor=_snake_case , offset=_snake_case , do_normalize=_snake_case , image_mean=_snake_case , image_std=_snake_case , data_format=_snake_case , )
for img in video
]
for video in videos
]
lowerCamelCase__ = {"""pixel_values""": videos}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 129 |
SCREAMING_SNAKE_CASE__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
SCREAMING_SNAKE_CASE__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 9 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def snake_case ( a_ : str , a_ : str ) -> Union[str, Any]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCamelCase_ : Tuple = flax_key_tuple[:-1] + ("""weight""",)
UpperCamelCase_ : Tuple = torch.permute(__UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCamelCase ):
# linear layer
UpperCamelCase_ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCamelCase_ : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase_ : List[Any] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def snake_case ( a_ : Dict , a_ : List[str] , a_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
if "metadata" in layer:
UpperCamelCase_ : List[str] = layer.split("""metadata""" )
UpperCamelCase_ : Union[str, Any] = """""".join(split_layer[0] )[:-1]
UpperCamelCase_ : int = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
UpperCamelCase_ : str = layer.split("""kvstore""" )
UpperCamelCase_ : List[str] = """""".join(split_layer[0] )[:-1]
UpperCamelCase_ : Union[str, Any] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
UpperCamelCase_ : str = layer.split("""/""" )
UpperCamelCase_ : Optional[Any] = """/""".join(split_layer[:-1] )
UpperCamelCase_ : Dict = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCamelCase_ : Union[str, Any] = f"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
UpperCamelCase_ : List[Any] = """file"""
else:
UpperCamelCase_ : Tuple = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def snake_case ( a_ : Tuple , a_ : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Dict = rename_keys(__UpperCamelCase )
UpperCamelCase_ : Tuple = {}
for k, v in current_block.items():
UpperCamelCase_ : Optional[Any] = v
UpperCamelCase_ : Any = new_current_block
torch.save(__UpperCamelCase , __UpperCamelCase )
def snake_case ( a_ : Optional[int] , a_ : Union[str, Any] , a_ : List[str] , a_ : str , a_ : Union[str, Any] = WEIGHTS_NAME ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : str = convert_file_size_to_int(__UpperCamelCase )
UpperCamelCase_ : str = []
UpperCamelCase_ : Any = {}
UpperCamelCase_ : Dict = 0
UpperCamelCase_ : Any = 0
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
UpperCamelCase_ : List[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
UpperCamelCase_ : Union[str, Any] = flatten_dict(__UpperCamelCase , sep="""/""" )
UpperCamelCase_ : Dict = {}
for layer in checkpoint_info.keys():
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Tuple = get_key_and_tensorstore_dict(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if curr_real_layer_name in all_layers:
UpperCamelCase_ : int = content
else:
UpperCamelCase_ : Dict = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCamelCase_ : str = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCamelCase_ : List[str] = torch.tensor(__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __UpperCamelCase )
UpperCamelCase_ : Optional[int] = """/""".join(__UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCamelCase_ : Dict = os.path.join(
__UpperCamelCase , weights_name.replace(""".bin""" , f"-{len(__UpperCamelCase )+1:05d}-of-???.bin" ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCamelCase_ : Dict = {}
UpperCamelCase_ : Any = 0
UpperCamelCase_ : Optional[Any] = raw_weights.to(getattr(__UpperCamelCase , __UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCamelCase_ : Tuple = os.path.join(__UpperCamelCase , weights_name.replace(""".bin""" , f"-{len(__UpperCamelCase )+1:05d}-of-???.bin" ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCamelCase_ : Any = {}
UpperCamelCase_ : Union[str, Any] = {}
for idx, shard in enumerate(__UpperCamelCase ):
UpperCamelCase_ : List[str] = weights_name.replace(
""".bin""" , f"-{idx+1:05d}-of-{len(__UpperCamelCase ):05d}.bin" ) # len(sharded_state_dicts):05d}
UpperCamelCase_ : Union[str, Any] = os.path.join(__UpperCamelCase , weights_name.replace(""".bin""" , f"-{idx+1:05d}-of-???.bin" ) )
os.rename(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ : int = shard
for key in shard:
UpperCamelCase_ : Optional[Any] = shard_file
# Add the metadata
UpperCamelCase_ : List[Any] = {"""total_size""": total_size}
UpperCamelCase_ : Optional[int] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase_ : Any = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + """\n"""
f.write(__UpperCamelCase )
return metadata, index
if __name__ == "__main__":
UpperCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCamelCase =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def snake_case ( ) -> Optional[Any]:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCamelCase_ : List[Any] = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
UpperCamelCase_ : Optional[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
UpperCamelCase_ : Optional[Any] = TaTokenizer.from_pretrained("""t5-small""" )
UpperCamelCase_ : int = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
UpperCamelCase_ : str = tokenizer(__UpperCamelCase , return_tensors="""pt""" ).input_ids
UpperCamelCase_ : Dict = model.generate(__UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 208 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : Any , **_snake_case : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
] , )
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
@slow
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _a ( self : Dict ):
"""simple docstring"""
pass
| 9 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
__A = r"\w+[.]\d+"
__A = re.findall(__UpperCamelCase , __UpperCamelCase )
for pat in pats:
__A = key.replace(__UpperCamelCase , "_".join(pat.split("." ) ) )
return key
def UpperCAmelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
__A = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__A = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__A = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__A = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__A = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__A = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__A = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
__A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__A = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__A = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase ( a_ , a_ , a_=4_2 ) -> Optional[int]:
"""simple docstring"""
__A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__A = flax_model.init_weights(PRNGKey(__UpperCamelCase ) )
__A = flatten_dict(__UpperCamelCase )
__A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__A = rename_key(__UpperCamelCase )
__A = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
__A , __A = rename_key_and_reshape_tensor(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
__A = jnp.asarray(__UpperCamelCase )
return unflatten_dict(__UpperCamelCase )
| 55 |
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ (UpperCAmelCase_ , unittest.TestCase ):
lowercase_ : Union[str, Any] = GPTSwaTokenizer
lowercase_ : Dict = False
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
def A__ ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = GPTSwaTokenizer(_snake_case , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self : Any , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = '''This is a test'''
lowerCAmelCase__ = '''This is a test'''
return input_text, output_text
def A__ ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = '''<s>'''
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def A__ ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_snake_case ) , 20_00 )
def A__ ( self : Any ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def A__ ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = GPTSwaTokenizer(_snake_case )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
_snake_case , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(_snake_case )
# fmt: off
self.assertListEqual(
_snake_case , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def A__ ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = GPTSwaTokenizer(_snake_case )
lowerCAmelCase__ = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
lowerCAmelCase__ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_snake_case , _snake_case ):
self.assertListEqual(tokenizer.encode_fast(_snake_case ) , _snake_case )
# Test that decode_fast returns the input text
for text, token_ids in zip(_snake_case , _snake_case ):
self.assertEqual(tokenizer.decode_fast(_snake_case ) , _snake_case )
@slow
def A__ ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=_snake_case , )
| 615 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A__ = gamma_a
A__ = gamma_a
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( ) -> Dict:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['model']
A__ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(__UpperCamelCase )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowerCAmelCase_ = 4 ):
"""simple docstring"""
lowercase = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = matrix[::-1]
return matrix
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = [x[::-1] for x in matrix]
return matrix
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : int = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
__lowerCamelCase : Tuple = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
__lowerCamelCase : Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 310 |
SCREAMING_SNAKE_CASE__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list[str]:
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 9 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
def lowercase__ ( lowercase_ ) -> bytes:
"""simple docstring"""
_UpperCamelCase : Dict = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
_UpperCamelCase : Optional[Any] = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(__UpperCamelCase ).content
if __name__ == "__main__":
lowerCamelCase__ = input("Enter Video/IGTV url: ").strip()
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 624 |
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = 0
A__ = len(__UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , point + 1 , __UpperCamelCase )
def A ( __UpperCamelCase ) -> List[str]:
if collection != sorted(__UpperCamelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
SCREAMING_SNAKE_CASE__ = 0
if debug == 1:
SCREAMING_SNAKE_CASE__ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
SCREAMING_SNAKE_CASE__ = 6_7
SCREAMING_SNAKE_CASE__ = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print('''Not found''')
| 9 | 0 |
import argparse
from collections import defaultdict
import yaml
_UpperCAmelCase : Tuple = """docs/source/en/_toctree.yml"""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : Any = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCamelCase__ : Any = [key for key, value in counts.items() if value > 1]
lowerCamelCase__ : Optional[Any] = []
for duplicate_key in duplicates:
lowerCamelCase__ : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda _UpperCAmelCase : s["title"].lower() )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=False ) -> str:
with open(__UpperCamelCase , encoding='utf-8' ) as f:
lowerCamelCase__ : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase__ : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase__ : int = content[api_idx]['sections']
# Then to the model doc
lowerCamelCase__ : Dict = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCamelCase__ : int = api_doc[model_idx]['sections']
lowerCamelCase__ : List[str] = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
lowerCamelCase__ : int = False
for idx, modality_doc in modalities_docs:
lowerCamelCase__ : Tuple = modality_doc['sections']
lowerCamelCase__ : Optional[Any] = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
lowerCamelCase__ : str = True
if overwrite:
lowerCamelCase__ : Tuple = new_modality_doc
if diff:
if overwrite:
lowerCamelCase__ : str = model_doc
lowerCamelCase__ : Optional[Any] = api_doc
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 295 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 9 | 0 |
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 601 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE__ = [0, 2_5, 5_0]
SCREAMING_SNAKE_CASE__ = [2_5, 5_0, 7_5]
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE__ = np.ones(7_5)
SCREAMING_SNAKE_CASE__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 9 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_A: List[str] = logging.get_logger(__name__)
_A: Optional[Any] = {"""vocab_file""": """vocab.txt"""}
_A: List[Any] = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
_A: List[Any] = {
"""facebook/esm2_t6_8M_UR50D""": 1_024,
"""facebook/esm2_t12_35M_UR50D""": 1_024,
}
def _lowerCAmelCase ( _lowerCAmelCase )-> Optional[Any]:
with open(__UpperCamelCase , 'r' ) as f:
__UpperCAmelCase = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : List[str] = VOCAB_FILES_NAMES
_A : Dict = PRETRAINED_VOCAB_FILES_MAP
_A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , __A , __A="<unk>" , __A="<cls>" , __A="<pad>" , __A="<mask>" , __A="<eos>" , **__A , ):
super().__init__(**_snake_case )
__UpperCAmelCase = load_vocab_file(_snake_case )
__UpperCAmelCase = dict(enumerate(self.all_tokens ) )
__UpperCAmelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
__UpperCAmelCase = unk_token
__UpperCAmelCase = cls_token
__UpperCAmelCase = pad_token
__UpperCAmelCase = mask_token
__UpperCAmelCase = eos_token
__UpperCAmelCase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCamelCase ( self , __A ):
return self._id_to_token.get(_snake_case , self.unk_token )
def __lowerCamelCase ( self , __A ):
return self._token_to_id.get(_snake_case , self._token_to_id.get(self.unk_token ) )
def __lowerCamelCase ( self , __A , **__A ):
return text.split()
def __lowerCamelCase ( self , __A=False ):
return len(self._id_to_token )
def __lowerCamelCase ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCamelCase ( self , __A ):
return self._token_to_id.get(_snake_case , self._token_to_id.get(self.unk_token ) )
def __lowerCamelCase ( self , __A ):
return self._id_to_token.get(_snake_case , self.unk_token )
def __lowerCamelCase ( self , __A , __A = None ):
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCamelCase ( self , __A , __A = None , __A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__UpperCAmelCase = [1] + ([0] * len(_snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(_snake_case ) + [1]
return mask
def __lowerCamelCase ( self , __A , __A ):
__UpperCAmelCase = os.path.join(_snake_case , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(_snake_case , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCamelCase ( self ):
return self.get_vocab_size(with_added_tokens=_snake_case )
def __lowerCamelCase ( self , __A , __A = False ):
return super()._add_tokens(_snake_case , special_tokens=_snake_case )
| 126 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _a ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _a ( self : int , _snake_case : int , _snake_case : List[str] ):
"""simple docstring"""
A__ = object_detector(examples[0] , threshold=0.0 )
A__ = len(_snake_case )
self.assertGreater(_snake_case , 0 )
self.assertEqual(
_snake_case , [
{
'score': ANY(_snake_case ),
'label': ANY(_snake_case ),
'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )},
}
for i in range(_snake_case )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@require_torch
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A__ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
A__ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
A__ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _a ( self : int ):
"""simple docstring"""
pass
@require_torch
@slow
def _a ( self : str ):
"""simple docstring"""
A__ = 0.2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = 2
A__ = pipeline('zero-shot-object-detection' )
A__ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 9 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : Union[str, Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
__magic_name__ : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = {}
with open(__UpperCamelCase , """r""" ) as file:
for line_number, line in enumerate(__UpperCamelCase ):
UpperCamelCase : Optional[int] = line.strip()
if line:
UpperCamelCase : List[Any] = line.split()
UpperCamelCase : Union[str, Any] = line_number
UpperCamelCase : Optional[int] = words[0]
UpperCamelCase : List[Any] = value
return result
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for attribute in key.split(""".""" ):
UpperCamelCase : Union[str, Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__UpperCamelCase ):
UpperCamelCase : Tuple = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCamelCase : int = """param"""
if weight_type is not None and weight_type != "param":
UpperCamelCase : Union[str, Any] = getattr(__UpperCamelCase , __UpperCamelCase ).shape
elif weight_type is not None and weight_type == "param":
UpperCamelCase : List[str] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
UpperCamelCase : Tuple = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase : Dict = shape_pointer.shape
# let's reduce dimension
UpperCamelCase : Tuple = value[0]
else:
UpperCamelCase : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCamelCase : Dict = value
elif weight_type == "weight_g":
UpperCamelCase : List[Any] = value
elif weight_type == "weight_v":
UpperCamelCase : List[Any] = value
elif weight_type == "bias":
UpperCamelCase : Tuple = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
UpperCamelCase : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase : str = value
else:
UpperCamelCase : Optional[int] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__UpperCamelCase ):
UpperCamelCase : Tuple = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCamelCase : Tuple = """param"""
if weight_type is not None and weight_type != "param":
UpperCamelCase : List[str] = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCamelCase : Optional[int] = """.""".join([key, hf_param_name] )
else:
UpperCamelCase : int = key
UpperCamelCase : Optional[Any] = value if """lm_head""" in full_key else value[0]
__magic_name__ : int = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ):
UpperCamelCase : Any = False
for key, mapped_key in MAPPING.items():
UpperCamelCase : Optional[int] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCamelCase : Dict = True
if "*" in mapped_key:
UpperCamelCase : List[Any] = name.split(__UpperCamelCase )[0].split(""".""" )[-2]
UpperCamelCase : str = mapped_key.replace("""*""" , __UpperCamelCase )
if "weight_g" in name:
UpperCamelCase : List[str] = """weight_g"""
elif "weight_v" in name:
UpperCamelCase : Any = """weight_v"""
elif "bias" in name:
UpperCamelCase : Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase : List[Any] = """weight"""
else:
UpperCamelCase : Union[str, Any] = None
if hf_dict is not None:
rename_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return is_used
return is_used
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = []
UpperCamelCase : Optional[int] = fairseq_model.state_dict()
UpperCamelCase : Tuple = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase : str = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
UpperCamelCase : List[str] = True
else:
UpperCamelCase : List[str] = load_wavaveca_layer(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
UpperCamelCase : Optional[Any] = name.split(""".""" )
UpperCamelCase : List[Any] = int(items[0] )
UpperCamelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCamelCase : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCamelCase : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCamelCase : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCamelCase : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False ):
if config_path is not None:
UpperCamelCase : str = WavaVecaConfig.from_pretrained(__UpperCamelCase )
else:
UpperCamelCase : str = WavaVecaConfig()
if is_seq_class:
UpperCamelCase : List[str] = read_txt_into_dict(__UpperCamelCase )
UpperCamelCase : Optional[int] = idalabel
UpperCamelCase : List[Any] = WavaVecaForSequenceClassification(__UpperCamelCase )
UpperCamelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
feature_extractor.save_pretrained(__UpperCamelCase )
elif is_finetuned:
if dict_path:
UpperCamelCase : int = Dictionary.load(__UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase : Union[str, Any] = target_dict.pad_index
UpperCamelCase : Tuple = target_dict.bos_index
UpperCamelCase : Optional[Any] = target_dict.eos_index
UpperCamelCase : Optional[int] = len(target_dict.symbols )
UpperCamelCase : Tuple = os.path.join(__UpperCamelCase , """vocab.json""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCamelCase ) )
return
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
UpperCamelCase : str = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Optional[int] = 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase : Tuple = WavaVecaCTCTokenizer(
__UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCamelCase , )
UpperCamelCase : Dict = True if config.feat_extract_norm == """layer""" else False
UpperCamelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
UpperCamelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
UpperCamelCase : Optional[Any] = WavaVecaForCTC(__UpperCamelCase )
else:
UpperCamelCase : List[str] = WavaVecaForPreTraining(__UpperCamelCase )
if is_finetuned or is_seq_class:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCamelCase : Union[str, Any] = argparse.Namespace(task="""audio_pretraining""" )
UpperCamelCase : List[Any] = fairseq.tasks.setup_task(__UpperCamelCase )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__UpperCamelCase )
UpperCamelCase : Tuple = model[0].eval()
recursively_load_weights(__UpperCamelCase , __UpperCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__magic_name__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
__magic_name__ : Union[str, Any] = parser.parse_args()
__magic_name__ : int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 102 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9 | 0 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]:
"""simple docstring"""
model.train()
__snake_case = model(__UpperCamelCase )
__snake_case = F.mse_loss(__UpperCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__UpperCamelCase )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
set_seed(42 )
__snake_case = RegressionModel()
__snake_case = deepcopy(__UpperCamelCase )
__snake_case = RegressionDataset(length=80 )
__snake_case = DataLoader(__UpperCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
__snake_case = AdamW(params=model.parameters() , lr=1e-3 )
__snake_case = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__snake_case = LambdaLR(__UpperCamelCase , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__snake_case = LambdaLR(__UpperCamelCase , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
__snake_case , __snake_case = accelerator.prepare(__UpperCamelCase , __UpperCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case = get_training_setup(__UpperCamelCase )
# Use a single batch
__snake_case , __snake_case = next(iter(__UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
# Sync grads
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__snake_case = ddp_input[torch.randperm(len(__UpperCamelCase ) )]
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case = get_training_setup(__UpperCamelCase )
# Use a single batch
__snake_case , __snake_case = next(iter(__UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
# Sync grads
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__snake_case = ddp_input[torch.randperm(len(__UpperCamelCase ) )]
def __UpperCamelCase ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
"""simple docstring"""
__snake_case = Accelerator(
split_batches=__UpperCamelCase , dispatch_batches=__UpperCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__snake_case , __snake_case , __snake_case = get_training_setup(__UpperCamelCase )
for iteration, batch in enumerate(__UpperCamelCase ):
__snake_case , __snake_case = batch.values()
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__UpperCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__snake_case = ddp_input[torch.randperm(len(__UpperCamelCase ) )]
GradientState._reset_state()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
"""simple docstring"""
__snake_case = Accelerator(
split_batches=__UpperCamelCase , dispatch_batches=__UpperCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = get_training_setup(__UpperCamelCase , __UpperCamelCase )
for iteration, batch in enumerate(__UpperCamelCase ):
__snake_case , __snake_case = batch.values()
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__UpperCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
__snake_case = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__UpperCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
__snake_case = Accelerator()
__snake_case = RegressionDataset(length=80 )
__snake_case = DataLoader(__UpperCamelCase , batch_size=16 )
__snake_case = RegressionDataset(length=96 )
__snake_case = DataLoader(__UpperCamelCase , batch_size=16 )
__snake_case , __snake_case = accelerator.prepare(__UpperCamelCase , __UpperCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__UpperCamelCase )
if iteration < len(__UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__UpperCamelCase )
if batch_num < len(__UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case = Accelerator()
__snake_case = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__UpperCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__UpperCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__UpperCamelCase , __UpperCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 163 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> List[Any]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ):
A__ = []
for k, v in d.items():
A__ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
A__ = argparse.Namespace()
with open(__UpperCamelCase , 'r' ) as yaml_file:
try:
A__ = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader )
A__ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase , str(__UpperCamelCase ) ) )
return config
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = MobileViTVaConfig()
A__ = False
# dataset
if task_name.startswith('imagenet1k_' ):
A__ = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
A__ = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
A__ = 151
A__ = 512
A__ = 'ade20k-id2label.json'
A__ = True
elif task_name.startswith('voc_' ):
A__ = 21
A__ = 512
A__ = 'pascal-voc-id2label.json'
A__ = True
# orig_config
A__ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
A__ = getattr(__UpperCamelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__UpperCamelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A__ = getattr(__UpperCamelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A__ = getattr(__UpperCamelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
A__ = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
A__ = 'huggingface/label-files'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Dict:
if base_model:
A__ = ''
else:
A__ = 'mobilevitv2.'
A__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A__ = k[8:]
else:
A__ = k
if ".block." in k:
A__ = k_new.replace('.block.' , '.' )
if ".conv." in k:
A__ = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
A__ = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
A__ = k_new.replace('conv_1.' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A__ = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A__ = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
A__ = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A__ = [0, 1]
elif i == 4:
A__ = [0, 1, 2, 3]
elif i == 5:
A__ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A__ = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
A__ = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
A__ = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
A__ = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
A__ = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
A__ = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
A__ = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
A__ = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
A__ = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A ( __UpperCamelCase ) -> Tuple:
A__ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> str:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
A__ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
A__ = False
else:
A__ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
A__ = False
# remove and rename some keys of load the original model
A__ = checkpoint
remove_unused_keys(__UpperCamelCase )
A__ = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
A__ = outputs.logits
A__ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A__ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 9 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
__magic_name__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __lowercase , __lowercase , __lowercase=8 ):
"""simple docstring"""
lowerCamelCase__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowerCamelCase__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : MultilingualCLIP , SCREAMING_SNAKE_CASE_ : XLMRobertaTokenizer , SCREAMING_SNAKE_CASE_ : UNetaDConditionModel , SCREAMING_SNAKE_CASE_ : Union[DDIMScheduler, DDPMScheduler] , SCREAMING_SNAKE_CASE_ : VQModel , ):
super().__init__()
self.register_modules(
text_encoder=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , movq=_snake_case , )
lowerCamelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if latents is None:
lowerCamelCase__ = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase__ = latents.to(_snake_case )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None , ):
lowerCamelCase__ = len(_snake_case ) if isinstance(_snake_case , _snake_case ) else 1
# get prompt text embeddings
lowerCamelCase__ = self.tokenizer(
_snake_case , padding="""max_length""" , truncation=_snake_case , max_length=77 , return_attention_mask=_snake_case , add_special_tokens=_snake_case , return_tensors="""pt""" , )
lowerCamelCase__ = text_inputs.input_ids
lowerCamelCase__ = self.tokenizer(_snake_case , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_snake_case , _snake_case ):
lowerCamelCase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCamelCase__ = text_input_ids.to(_snake_case )
lowerCamelCase__ = text_inputs.attention_mask.to(_snake_case )
lowerCamelCase__ , lowerCamelCase__ = self.text_encoder(
input_ids=_snake_case , attention_mask=_snake_case )
lowerCamelCase__ = prompt_embeds.repeat_interleave(_snake_case , dim=0 )
lowerCamelCase__ = text_encoder_hidden_states.repeat_interleave(_snake_case , dim=0 )
lowerCamelCase__ = text_mask.repeat_interleave(_snake_case , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = 42
if negative_prompt is None:
lowerCamelCase__ = [""""""] * batch_size
elif type(_snake_case ) is not type(_snake_case ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_snake_case )} !="""
f""" {type(_snake_case )}.""" )
elif isinstance(_snake_case , _snake_case ):
lowerCamelCase__ = [negative_prompt]
elif batch_size != len(_snake_case ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_snake_case )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
lowerCamelCase__ = negative_prompt
lowerCamelCase__ = self.tokenizer(
_snake_case , padding="""max_length""" , max_length=77 , truncation=_snake_case , return_attention_mask=_snake_case , add_special_tokens=_snake_case , return_tensors="""pt""" , )
lowerCamelCase__ = uncond_input.input_ids.to(_snake_case )
lowerCamelCase__ = uncond_input.attention_mask.to(_snake_case )
lowerCamelCase__ , lowerCamelCase__ = self.text_encoder(
input_ids=_snake_case , attention_mask=_snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase__ = negative_prompt_embeds.shape[1]
lowerCamelCase__ = negative_prompt_embeds.repeat(1 , _snake_case )
lowerCamelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _snake_case )
lowerCamelCase__ = uncond_text_encoder_hidden_states.shape[1]
lowerCamelCase__ = uncond_text_encoder_hidden_states.repeat(1 , _snake_case , 1 )
lowerCamelCase__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _snake_case , -1 )
lowerCamelCase__ = uncond_text_mask.repeat_interleave(_snake_case , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowerCamelCase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowerCamelCase__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase__ = torch.device(f"""cuda:{gpu_id}""" )
lowerCamelCase__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case )
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCamelCase__ = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowerCamelCase__ , lowerCamelCase__ = cpu_offload_with_hook(_snake_case , _snake_case , prev_module_hook=_snake_case )
if self.safety_checker is not None:
lowerCamelCase__ , lowerCamelCase__ = cpu_offload_with_hook(self.safety_checker , _snake_case , prev_module_hook=_snake_case )
# We'll offload the last model manually.
lowerCamelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self : Union[str, Any] ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_snake_case )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] , SCREAMING_SNAKE_CASE_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE_ : int = 512 , SCREAMING_SNAKE_CASE_ : int = 512 , SCREAMING_SNAKE_CASE_ : int = 100 , SCREAMING_SNAKE_CASE_ : float = 4.0 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , ):
if isinstance(_snake_case , _snake_case ):
lowerCamelCase__ = 1
elif isinstance(_snake_case , _snake_case ):
lowerCamelCase__ = len(_snake_case )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_snake_case )}""" )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._encode_prompt(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if isinstance(_snake_case , _snake_case ):
lowerCamelCase__ = torch.cat(_snake_case , dim=0 )
if isinstance(_snake_case , _snake_case ):
lowerCamelCase__ = torch.cat(_snake_case , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = image_embeds.repeat_interleave(_snake_case , dim=0 )
lowerCamelCase__ = negative_image_embeds.repeat_interleave(_snake_case , dim=0 )
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_snake_case )
self.scheduler.set_timesteps(_snake_case , device=_snake_case )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.unet.config.in_channels
lowerCamelCase__ , lowerCamelCase__ = get_new_h_w(_snake_case , _snake_case , self.movq_scale_factor )
# create initial latent
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _snake_case , _snake_case , _snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
lowerCamelCase__ = self.unet(
sample=_snake_case , timestep=_snake_case , encoder_hidden_states=_snake_case , added_cond_kwargs=_snake_case , return_dict=_snake_case , )[0]
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ , lowerCamelCase__ = variance_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(
_snake_case , _snake_case , _snake_case , generator=_snake_case , ).prev_sample
# post-processing
lowerCamelCase__ = self.movq.decode(_snake_case , force_not_quantize=_snake_case )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase__ = image * 0.5 + 0.5
lowerCamelCase__ = image.clamp(0 , 1 )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 129 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = '''docs/source/en/_toctree.yml'''
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
def A ( __UpperCamelCase=False ) -> str:
with open(__UpperCamelCase , encoding='utf-8' ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]['sections']
# Then to the model doc
A__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A__ = api_doc[model_idx]['sections']
A__ = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
A__ = False
for idx, modality_doc in modalities_docs:
A__ = modality_doc['sections']
A__ = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
A__ = True
if overwrite:
A__ = new_modality_doc
if diff:
if overwrite:
A__ = model_doc
A__ = api_doc
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 9 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCamelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
super().__init__()
self.register_modules(
vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , )
def _UpperCAmelCase ( self , __lowerCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase_ : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_snake_case )
def _UpperCAmelCase ( self ):
self.enable_attention_slicing(_snake_case )
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = 5_12 , __lowerCAmelCase = 5_12 , __lowerCAmelCase = 50 , __lowerCAmelCase = 7.5 , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = None , **__lowerCAmelCase , ):
if isinstance(_snake_case , _snake_case ):
UpperCamelCase_ : Tuple = 1
elif isinstance(_snake_case , _snake_case ):
UpperCamelCase_ : Optional[int] = len(_snake_case )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_snake_case , _snake_case ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_snake_case )}." )
# get prompt text embeddings
UpperCamelCase_ : Optional[Any] = self.tokenizer(
_snake_case , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase_ : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCamelCase_ : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Dict = text_embeddings.shape
UpperCamelCase_ : List[Any] = text_embeddings.repeat(1 , _snake_case , 1 )
UpperCamelCase_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , _snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase_ : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase_ : str = 42
if negative_prompt is None:
UpperCamelCase_ : Union[str, Any] = [""""""]
elif type(_snake_case ) is not type(_snake_case ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(_snake_case )} !="
F" {type(_snake_case )}." )
elif isinstance(_snake_case , _snake_case ):
UpperCamelCase_ : int = [negative_prompt]
elif batch_size != len(_snake_case ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(_snake_case )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
""" the batch size of `prompt`.""" )
else:
UpperCamelCase_ : Dict = negative_prompt
UpperCamelCase_ : Any = text_input_ids.shape[-1]
UpperCamelCase_ : List[str] = self.tokenizer(
_snake_case , padding="""max_length""" , max_length=_snake_case , truncation=_snake_case , return_tensors="""pt""" , )
UpperCamelCase_ : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ : Union[str, Any] = uncond_embeddings.shape[1]
UpperCamelCase_ : List[Any] = uncond_embeddings.repeat(_snake_case , _snake_case , 1 )
UpperCamelCase_ : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , _snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase_ : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCamelCase_ : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase_ : Tuple = torch.randn(
_snake_case , generator=_snake_case , device="""cpu""" , dtype=_snake_case ).to(self.device )
UpperCamelCase_ : List[Any] = torch.randn(_snake_case , generator=_snake_case , device="""cpu""" , dtype=_snake_case ).to(
self.device )
else:
UpperCamelCase_ : List[Any] = torch.randn(
_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
UpperCamelCase_ : int = torch.randn(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase_ : str = latents_reference.to(self.device )
UpperCamelCase_ : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCamelCase_ : Dict = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCamelCase_ : Optional[Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCamelCase_ : Optional[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCamelCase_ : int = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCamelCase_ : int = 0 if dx < 0 else dx
UpperCamelCase_ : Dict = 0 if dy < 0 else dy
UpperCamelCase_ : str = max(-dx , 0 )
UpperCamelCase_ : int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCamelCase_ : Optional[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase_ : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase_ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase_ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase_ : Any = {}
if accepts_eta:
UpperCamelCase_ : str = eta
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ : Optional[Any] = self.scheduler.scale_model_input(_snake_case , _snake_case )
# predict the noise residual
UpperCamelCase_ : Optional[Any] = self.unet(_snake_case , _snake_case , encoder_hidden_states=_snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase_ , UpperCamelCase_ : List[Any] = noise_pred.chunk(2 )
UpperCamelCase_ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ : Optional[int] = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_snake_case , _snake_case , _snake_case )
UpperCamelCase_ : Optional[Any] = 1 / 0.1_82_15 * latents
UpperCamelCase_ : List[Any] = self.vae.decode(_snake_case ).sample
UpperCamelCase_ : str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCamelCase_ : Any = self.feature_extractor(self.numpy_to_pil(_snake_case ) , return_tensors="""pt""" ).to(
self.device )
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] = self.safety_checker(
images=_snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCamelCase_ : int = None
if output_type == "pil":
UpperCamelCase_ : List[str] = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_snake_case , nsfw_content_detected=_snake_case )
| 208 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : int ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
A__ : str = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 9 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = None , a_ = None , ) -> Optional[int]:
"""simple docstring"""
if config_name_or_path is None:
__A = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
__A = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__A = question_encoder_name_or_path
__A = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
__A = RagConfig.from_pretrained(__UpperCamelCase )
__A = AutoConfig.from_pretrained(__UpperCamelCase )
__A = AutoConfig.from_pretrained(__UpperCamelCase )
__A = gen_config
__A = question_encoder_config
__A = model_class.from_pretrained_question_encoder_generator(
__UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
rag_model.save_pretrained(__UpperCamelCase )
# Sanity check.
model_class.from_pretrained(__UpperCamelCase )
# Save tokenizers.
__A = AutoTokenizer.from_pretrained(__UpperCamelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
__A = AutoTokenizer.from_pretrained(__UpperCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE :Dict = parser.parse_args()
SCREAMING_SNAKE_CASE :Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 55 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9 | 0 |
import cva
import numpy as np
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
if k in (0.04, 0.06):
lowerCAmelCase__ = k
lowerCAmelCase__ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Any ):
"""simple docstring"""
return str(self.k )
def A__ ( self : Union[str, Any] , __lowerCamelCase : str ):
"""simple docstring"""
lowerCAmelCase__ = cva.imread(_snake_case , 0 )
lowerCAmelCase__ , lowerCAmelCase__ = img.shape
lowerCAmelCase__ = []
lowerCAmelCase__ = img.copy()
lowerCAmelCase__ = cva.cvtColor(_snake_case , cva.COLOR_GRAY2RGB )
lowerCAmelCase__ , lowerCAmelCase__ = np.gradient(_snake_case )
lowerCAmelCase__ = dx**2
lowerCAmelCase__ = dy**2
lowerCAmelCase__ = dx * dy
lowerCAmelCase__ = 0.04
lowerCAmelCase__ = self.window_size // 2
for y in range(_snake_case , h - offset ):
for x in range(_snake_case , w - offset ):
lowerCAmelCase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase__ = (wxx * wyy) - (wxy**2)
lowerCAmelCase__ = wxx + wyy
lowerCAmelCase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__magic_name__ : Any = HarrisCorner(0.04, 3)
__magic_name__ , __magic_name__ : Tuple = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 615 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["input_values", "attention_mask"]
def __init__( self : str , _snake_case : int = 1 , _snake_case : int = 1_60_00 , _snake_case : float = 0.0 , _snake_case : bool = False , _snake_case : int = 80 , _snake_case : int = 16 , _snake_case : int = 64 , _snake_case : str = "hann_window" , _snake_case : float = 1.0 , _snake_case : float = 80 , _snake_case : float = 76_00 , _snake_case : float = 1E-10 , _snake_case : int = 2 , _snake_case : bool = True , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
A__ = do_normalize
A__ = return_attention_mask
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = frame_signal_scale
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = reduction_factor
A__ = win_length * sampling_rate // 10_00
A__ = hop_length * sampling_rate // 10_00
A__ = optimal_fft_length(self.sample_size )
A__ = (self.n_fft // 2) + 1
A__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_snake_case )
A__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
A__ = np.array(_snake_case , np.intaa )
A__ = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A__ = padding_value
normed_input_values.append(_snake_case )
else:
A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _a ( self : Tuple , _snake_case : np.ndarray , ):
"""simple docstring"""
A__ = spectrogram(
_snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : List[str] , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[int] = None , **_snake_case : Tuple , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
else:
A__ = None
if audio_target is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
if inputs is None:
return inputs_target
else:
A__ = inputs_target['input_values']
A__ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def _a ( self : Tuple , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = False , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = isinstance(_snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
A__ = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [speech]
# needed to make pad() work on spectrogram inputs
A__ = self.feature_size
# convert into correct format for padding
if is_target:
A__ = [self._extract_mel_features(_snake_case ) for waveform in speech]
A__ = BatchFeature({'input_values': features} )
A__ = self.num_mel_bins
else:
A__ = BatchFeature({'input_values': speech} )
A__ = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
A__ = feature_size_hack
# convert input values to correct format
A__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A__ = (
attention_mask
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_snake_case , padding_value=self.padding_value )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 9 | 0 |
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__lowerCamelCase : Optional[Any] = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
__lowerCamelCase : List[str] = "hopper-medium-v2"
__lowerCamelCase : Dict = gym.make(env_name)
__lowerCamelCase : int = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
__lowerCamelCase : str = env.reset()
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Optional[int] = 1000
__lowerCamelCase : Any = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__lowerCamelCase : List[Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = env.step(denorm_actions)
__lowerCamelCase : int = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
f" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
__lowerCamelCase : Tuple = next_observation
except KeyboardInterrupt:
pass
print(f"Total reward: {total_reward}")
| 310 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = OmegaConf.load(__UpperCamelCase )
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = 'first_stage_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(__UpperCamelCase ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**__UpperCamelCase ).eval()
vqvae.load_state_dict(__UpperCamelCase )
A__ = UNetLDMModel(**__UpperCamelCase ).eval()
unet.load_state_dict(__UpperCamelCase )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , )
A__ = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipeline.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 0 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict , *__a : List[str] , __a : Any=None , __a : Any=None , **__a : int ) -> Tuple:
super().__init__(*_snake_case , **_snake_case )
_UpperCamelCase : Optional[Any] = eval_examples
_UpperCamelCase : Dict = post_process_function
def __SCREAMING_SNAKE_CASE ( self : int , __a : Optional[Dataset] = None , __a : int=None , __a : Optional[List[str]] = None , __a : str = "eval" , **__a : Dict , ) -> int:
_UpperCamelCase : int = gen_kwargs.copy()
_UpperCamelCase : Dict = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
_UpperCamelCase : int = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
_UpperCamelCase : Dict = gen_kwargs
_UpperCamelCase : Dict = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCamelCase : Union[str, Any] = self.get_eval_dataloader(_snake_case )
_UpperCamelCase : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase : Optional[int] = self.compute_metrics
_UpperCamelCase : Tuple = None
_UpperCamelCase : int = time.time()
_UpperCamelCase : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCamelCase : List[str] = eval_loop(
_snake_case , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_snake_case , metric_key_prefix=_snake_case , )
finally:
_UpperCamelCase : List[Any] = compute_metrics
_UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_snake_case , _snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCamelCase : int = self.post_process_function(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = self.compute_metrics(_snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_UpperCamelCase : Tuple = metrics.pop(_snake_case )
metrics.update(output.metrics )
else:
_UpperCamelCase : int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_snake_case )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_UpperCamelCase : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _snake_case )
return metrics
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Union[str, Any] , __a : Optional[int] , __a : str=None , __a : str = "test" , **__a : List[str] ) -> Dict:
_UpperCamelCase : Tuple = gen_kwargs.copy()
_UpperCamelCase : Tuple = self.get_test_dataloader(_snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase : int = self.compute_metrics
_UpperCamelCase : List[str] = None
_UpperCamelCase : Optional[int] = time.time()
_UpperCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCamelCase : int = eval_loop(
_snake_case , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_snake_case , metric_key_prefix=_snake_case , )
finally:
_UpperCamelCase : Tuple = compute_metrics
_UpperCamelCase : List[str] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_snake_case , _snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCamelCase : Any = self.post_process_function(_snake_case , _snake_case , _snake_case , "predict" )
_UpperCamelCase : Any = self.compute_metrics(_snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_UpperCamelCase : Dict = metrics.pop(_snake_case )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_snake_case )
| 624 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( UpperCAmelCase_ ):
def __init__( self : int , *UpperCAmelCase : List[str] , **UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 295 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 | 0 |
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
try:
__UpperCamelCase = float(__UpperCamelCase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__UpperCamelCase = decimal - int(__UpperCamelCase )
if fractional_part == 0:
return int(__UpperCamelCase ), 1
else:
__UpperCamelCase = len(str(__UpperCamelCase ).split(""".""" )[1] )
__UpperCamelCase = int(decimal * (10**number_of_frac_digits) )
__UpperCamelCase = 10**number_of_frac_digits
__UpperCamelCase , __UpperCamelCase = denominator, numerator
while True:
__UpperCamelCase = dividend % divisor
if remainder == 0:
break
__UpperCamelCase , __UpperCamelCase = divisor, remainder
__UpperCamelCase , __UpperCamelCase = numerator / divisor, denominator / divisor
return int(__UpperCamelCase ), int(__UpperCamelCase )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 601 |
from __future__ import annotations
from typing import Any
def A ( __UpperCamelCase ) -> int:
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.