code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def UpperCamelCase_( lowerCamelCase_ ) -> list[int]:
if length <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(lowerCamelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 21 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.'
UpperCamelCase__ : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A_ : int = bytes(a_ , """utf-8""" )
with zstd.open(a_ , """wb""" ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Any = input_paths[compression_format]
A_ : Tuple = tmp_path / """cache"""
A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
A_ : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
A_ : Optional[Any] = f.read()
with open(a_ ) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = """custom_cache"""
A_ : List[str] = """custom_extracted_dir"""
A_ : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : List[Any] = xz_file
A_ : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
A_ : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
A_ : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with pytest.raises(a_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
http_get("""https://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head("""s3://huggingface.co""" )
| 344 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
class A_ ( lowerCAmelCase_ ):
def __init__( self : str , *snake_case_ : List[str] , **snake_case_ : int ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 22 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
UpperCamelCase__: List[str] = {"vocab_file": "sentencepiece.model"}
UpperCamelCase__: Tuple = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase__: Optional[int] = {
"google/rembert": 256,
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[int]=False , __snake_case : Tuple=True , __snake_case : int=True , __snake_case : str="[CLS]" , __snake_case : Tuple="[SEP]" , __snake_case : str="[UNK]" , __snake_case : Optional[Any]="[SEP]" , __snake_case : Union[str, Any]="[PAD]" , __snake_case : Dict="[CLS]" , __snake_case : Optional[Any]="[MASK]" , **__snake_case : List[Any] , ) -> int:
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
UpperCAmelCase : Tuple = do_lower_case
UpperCAmelCase : Dict = remove_space
UpperCAmelCase : Any = keep_accents
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = spm.SentencePieceProcessor()
self.sp_model.Load(__snake_case )
@property
def A ( self : Union[str, Any] ) -> Optional[int]:
return len(self.sp_model )
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self.__dict__.copy()
UpperCAmelCase : Dict = None
return state
def __setstate__( self : Any , __snake_case : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : List[str] = d
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def A ( self : Tuple , __snake_case : Any , __snake_case : Union[str, Any]=False ) -> Union[str, Any]:
UpperCAmelCase : int = self.sp_model.EncodeAsPieces(__snake_case )
return pieces
def A ( self : str , __snake_case : Union[str, Any] ) -> int:
return self.sp_model.PieceToId(__snake_case )
def A ( self : str , __snake_case : str ) -> List[Any]:
return self.sp_model.IdToPiece(__snake_case )
def A ( self : int , __snake_case : Any ) -> List[Any]:
UpperCAmelCase : Optional[int] = self.sp_model.decode_pieces(__snake_case )
return out_string
def A ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : List[Any] = [self.sep_token_id]
UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def A ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__snake_case ) )
return
UpperCAmelCase : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 23 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344 | 0 |
import re
def lowerCamelCase__ ( snake_case_ : str ) -> list:
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def lowerCamelCase__ ( snake_case_ : str ) -> str:
__snake_case = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : bool , snake_case_ : str ) -> str:
try:
__snake_case = split_input(snake_case_ )
if upper:
__snake_case = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__snake_case = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return to_simple_case(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str ) -> str:
try:
__snake_case = to_simple_case(snake_case_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : bool ) -> str:
return to_complex_case(snake_case_ , snake_case_ , '''_''' )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : bool ) -> str:
return to_complex_case(snake_case_ , snake_case_ , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 24 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 0 |
"""simple docstring"""
import requests
UpperCAmelCase__ : Any = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def lowercase_ ( _snake_case ):
# fetching a list of articles in json format
SCREAMING_SNAKE_CASE__ : List[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] ,1 ):
print(f'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 25 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]:
A_ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : List[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
A_ : Dict = scheduler_class(**_lowerCamelCase )
A_ : Dict = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Any = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[Any] = pred_prev_sample
A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(25 )
A_ : List[str] = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
if i + 1 == timesteps.shape[0]:
A_ : List[str] = None
else:
A_ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Optional[Any] = pred_prev_sample
A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
pass
| 344 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
_A : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case_ ) )
return round(snake_case_,ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : Dict = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__a : Union[str, Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 27 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str:
super().__init__()
A_ : Optional[Any] = pad_token_id
A_ : List[Any] = max_length
A_ : str = vocab
A_ : Union[str, Any] = merges
A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int:
A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
A_ : Dict = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str:
A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]:
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any:
A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase )
A_ : Any = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : Tuple = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 344 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger("transformers.models.encodec")
_lowerCamelCase : int = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
_lowerCamelCase : Optional[int] = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
_lowerCamelCase : Optional[Any] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
_lowerCamelCase : int = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
_lowerCamelCase : Union[str, Any] = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
_lowerCamelCase : Optional[int] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowerCamelCase : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = []
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
for attribute in key.split('.' ):
UpperCamelCase = getattr(A__ , A__ )
if weight_type is not None:
UpperCamelCase = getattr(A__ , A__ ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "weight_ih_l0":
UpperCamelCase = value
elif weight_type == "weight_hh_l0":
UpperCamelCase = value
elif weight_type == "bias_ih_l0":
UpperCamelCase = value
elif weight_type == "bias_hh_l0":
UpperCamelCase = value
elif weight_type == "weight_ih_l1":
UpperCamelCase = value
elif weight_type == "weight_hh_l1":
UpperCamelCase = value
elif weight_type == "bias_ih_l1":
UpperCamelCase = value
elif weight_type == "bias_hh_l1":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase , UpperCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
UpperCamelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCamelCase = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCamelCase = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(A__ , A__ ):
logger.info(F"""{name} was ignored""" )
continue
UpperCamelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCamelCase , UpperCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
UpperCamelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(A__ )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , A__ )
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "weight_ih_l0" in name:
UpperCamelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
UpperCamelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
UpperCamelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
UpperCamelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
UpperCamelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
UpperCamelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
UpperCamelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
UpperCamelCase = 'bias_hh_l1'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ , A__=None , A__=None , ) -> Optional[int]:
"""simple docstring"""
if config_path is not None:
UpperCamelCase = EncodecConfig.from_pretrained(A__ )
else:
UpperCamelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCamelCase = [8, 5, 4, 4]
UpperCamelCase = [2.2]
UpperCamelCase = 64
UpperCamelCase = 32_000
UpperCamelCase = 2_048
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
elif model_name == "encodec_48khz":
UpperCamelCase = [8, 5, 4, 2]
UpperCamelCase = [3.0, 6.0, 12.0, 24.0]
UpperCamelCase = 48_000
UpperCamelCase = 2
UpperCamelCase = False
UpperCamelCase = 'time_group_norm'
UpperCamelCase = True
UpperCamelCase = 1.0
UpperCamelCase = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCamelCase = EncodecModel(A__ )
UpperCamelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(A__ )
UpperCamelCase = torch.load(A__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCamelCase = original_checkpoint['best_state']
recursively_load_weights(A__ , A__ , A__ )
model.save_pretrained(A__ )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(A__ )
model.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowerCamelCase : Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( a_ , a_ ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
'''simple docstring'''
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Optional[int] = list[list[int]]
# assigning initial values to the grid
__SCREAMING_SNAKE_CASE : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__SCREAMING_SNAKE_CASE : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase_ ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase_ ( _UpperCAmelCase : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : int = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase : Dict = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase : Tuple = 0
return None
def UpperCamelCase_ ( _UpperCAmelCase : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
__SCREAMING_SNAKE_CASE : Tuple = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 31 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any:
A_ : List[Any] = parent
A_ : int = config_class
A_ : int = has_text_modality
A_ : str = kwargs
A_ : int = common_properties
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : Optional[int] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
A_ : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = self.config_class(**self.inputs_dict )
A_ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" )
config_first.to_json_file(_lowerCamelCase )
A_ : Dict = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : List[Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A_ : str = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_class.is_composition:
return
A_ : Dict = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = copy.deepcopy(_lowerCamelCase )
A_ : Tuple = self.config_class(**_lowerCamelCase )
A_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 344 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {'vocab_file': 'vocab.txt'}
UpperCAmelCase_ : Optional[int] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
UpperCAmelCase_ : Tuple = {
'openbmb/cpm-ant-10b': 1024,
}
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Tuple:
"""simple docstring"""
a_ : Union[str, Any] = collections.OrderedDict()
with open(__A , 'r' , encoding='utf-8' ) as reader:
a_ : int = reader.readlines()
for index, token in enumerate(__A ):
a_ : Union[str, Any] = token.rstrip('\n' )
a_ : Union[str, Any] = index
return vocab
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_0_0 ) -> List[str]:
a_ : List[Any] = vocab
a_ : Tuple = unk_token
a_ : Tuple = max_input_chars_per_word
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
a_ : Any = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.max_input_chars_per_word:
return [self.unk_token]
a_ : Tuple = 0
a_ : Union[str, Any] = []
while start < len(SCREAMING_SNAKE_CASE__ ):
a_ : List[Any] = len(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = None
while start < end:
a_ : Dict = ''.join(chars[start:end] )
if substr in self.vocab:
a_ : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = end
return sub_tokens
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : str = ['''input_ids''', '''attention_mask''']
snake_case__ : Union[str, Any] = False
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict="<d>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</d>" , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE__ : Tuple="<pad>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : str="</n>" , SCREAMING_SNAKE_CASE__ : Any="</_>" , SCREAMING_SNAKE_CASE__ : Tuple="left" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Union[str, Any]:
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=SCREAMING_SNAKE_CASE__ , eod_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , line_token=SCREAMING_SNAKE_CASE__ , space_token=SCREAMING_SNAKE_CASE__ , padding_side=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : Tuple = bod_token
a_ : str = eod_token
a_ : Optional[int] = load_vocab(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.encoder[space_token]
a_ : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a_ : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE__ : x[1] ) )
a_ : List[Any] = {v: k for k, v in self.encoder.items()}
a_ : str = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.encoder[self.bod_token]
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
return self.encoder[self.eod_token]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
return self.encoder["\n"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
a_ : Union[str, Any] = []
for x in jieba.cut(SCREAMING_SNAKE_CASE__ , cut_all=SCREAMING_SNAKE_CASE__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
return output_tokens
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
a_ : Optional[Any] = [i for i in token_ids if i >= 0]
a_ : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
return token in self.encoder
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return "".join(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
a_ : str = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
a_ : str = (filename_prefix + '-' if filename_prefix else '') + save_directory
a_ : int = 0
if " " in self.encoder:
a_ : List[str] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
a_ : Union[str, Any] = self.encoder['\n']
del self.encoder["\n"]
a_ : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE__ : x[1] ) )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
a_ : Optional[Any] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : List[int] = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
| 32 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344 | 0 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None
def lowercase ( __snake_case : Union[str, Any] , __snake_case : List[str]=0.999 , __snake_case : str="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case : Dict ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowercase_ : Optional[Any] = []
for i in range(__snake_case ):
lowercase_ : int = i / num_diffusion_timesteps
lowercase_ : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class _UpperCAmelCase ( _A , _A ):
SCREAMING_SNAKE_CASE_ : Any = 1
@register_to_config
def __init__( self : Any , A : int = 10_00 , A : float = 0.0001 , A : float = 0.02 , A : str = "linear" , A : Optional[Union[np.ndarray, List[float]]] = None , A : bool = True , A : bool = True , A : int = 0 , A : str = "epsilon" , A : float = 1.0 , **A : Union[str, Any] , ) -> Any:
if kwargs.get('''set_alpha_to_one''' , A ) is not None:
lowercase_ : List[Any] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , A , standard_warn=A )
lowercase_ : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
lowercase_ : List[str] = torch.tensor(A , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase_ : Any = torch.linspace(A , A , A , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(A )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowercase_ : List[Any] = 1.0 - self.betas
lowercase_ : Optional[Any] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase_ : Tuple = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase_ : Optional[int] = 1.0
# setable values
lowercase_ : Optional[int] = None
lowercase_ : Tuple = torch.from_numpy(np.arange(0 , A ).copy().astype(np.intaa ) )
def A ( self : Any , A : torch.FloatTensor , A : Optional[int] = None ) -> torch.FloatTensor:
return sample
def A ( self : int , A : int , A : Union[str, torch.device] = None ) -> Any:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
lowercase_ : Optional[int] = num_inference_steps
lowercase_ : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase_ : Dict = (np.arange(0 , A ) * step_ratio).round().copy().astype(np.intaa )
lowercase_ : str = torch.from_numpy(A ).to(A )
self.timesteps += self.config.steps_offset
def A ( self : Union[str, Any] , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : float = 0.0 , A : bool = False , A : Optional[torch.FloatTensor] = None , A : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
lowercase_ : Optional[int] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase_ : List[str] = self.alphas_cumprod[timestep]
lowercase_ : Tuple = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase_ : Dict = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase_ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase_ : List[Any] = model_output
elif self.config.prediction_type == "sample":
lowercase_ : Tuple = model_output
lowercase_ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase_ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase_ : Union[str, Any] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase_ : Union[str, Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase_ : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase_ : int = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=A , pred_original_sample=A )
def __len__( self : str ) -> Tuple:
return self.config.num_train_timesteps
| 33 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]:
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = num_channels
A_ : Tuple = embeddings_size
A_ : str = hidden_sizes
A_ : Optional[Any] = depths
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_act
A_ : Optional[Any] = num_labels
A_ : str = scope
A_ : Optional[int] = len(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Dict = TFRegNetModel(config=_lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = self.num_labels
A_ : int = TFRegNetForImageClassification(_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = TFRegNetModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase_ ( self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Dict = layer_type
A_ : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" )
# forward pass
A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
A_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
| 344 | 0 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _a ( __a ):
def __init__( self : str , lowercase : Union[str, Any] , lowercase : Tuple=None , lowercase : Any=True , lowercase : Dict=None , **lowercase : int ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = config_class
UpperCAmelCase = has_text_modality
UpperCAmelCase = kwargs
UpperCAmelCase = common_properties
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.config_class(**self.inputs_dict )
UpperCAmelCase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase , lowercase ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase ):
try:
setattr(lowercase , lowercase , lowercase )
self.parent.assertEqual(
getattr(lowercase , lowercase ) , lowercase , msg=f"`{name} value {idx} expected, but was {getattr(lowercase , lowercase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase ):
try:
UpperCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowercase , lowercase ) , lowercase , msg=f"`{name} value {idx} expected, but was {getattr(lowercase , lowercase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.config_class(**self.inputs_dict )
UpperCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowercase )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase = os.path.join(lowercase , '''config.json''' )
config_first.to_json_file(lowercase )
UpperCAmelCase = self.config_class.from_json_file(lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase )
UpperCAmelCase = self.config_class.from_pretrained(lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.config_class(**self.inputs_dict )
UpperCAmelCase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase = os.path.join(lowercase , lowercase )
config_first.save_pretrained(lowercase )
UpperCAmelCase = self.config_class.from_pretrained(lowercase , subfolder=lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
UpperCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.config_class.is_composition:
return
UpperCAmelCase = self.config_class()
self.parent.assertIsNotNone(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = copy.deepcopy(lowercase )
UpperCAmelCase = self.config_class(**lowercase )
UpperCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(lowercase , lowercase ) != value:
wrong_values.append((key, getattr(lowercase , lowercase ), value) )
if len(lowercase ) > 0:
UpperCAmelCase = '''\n'''.join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def A ( self : Tuple ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 34 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = n * (n + 1) * (2 * n + 1) / 6
A_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 344 | 0 |
'''simple docstring'''
import numpy as np
def __snake_case( _lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def __snake_case( _lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase__ : int = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
_lowerCAmelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
_lowerCAmelCase : Any = "xvjiarui/stable-diffusion-2-inpainting"
_lowerCAmelCase , _lowerCAmelCase : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(__a, safety_checker=__a)
_lowerCAmelCase : List[Any] = "Face of a yellow cat, high resolution, sitting on a park bench"
_lowerCAmelCase : List[Any] = jax.random.PRNGKey(0)
_lowerCAmelCase : int = 50
_lowerCAmelCase : Optional[Any] = jax.device_count()
_lowerCAmelCase : Dict = num_samples * [prompt]
_lowerCAmelCase : Dict = num_samples * [init_image]
_lowerCAmelCase : Any = num_samples * [mask_image]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = pipeline.prepare_inputs(__a, __a, __a)
# shard inputs and rng
_lowerCAmelCase : Any = replicate(__a)
_lowerCAmelCase : List[Any] = jax.random.split(__a, jax.device_count())
_lowerCAmelCase : Optional[int] = shard(__a)
_lowerCAmelCase : List[str] = shard(__a)
_lowerCAmelCase : Optional[int] = shard(__a)
_lowerCAmelCase : int = pipeline(
__a, __a, __a, __a, __a, __a, jit=__a)
_lowerCAmelCase : Dict = output.images.reshape(__a, 512, 512, 3)
_lowerCAmelCase : Optional[Any] = images[0, 253:256, 253:256, -1]
_lowerCAmelCase : List[str] = jnp.asarray(jax.device_get(image_slice.flatten()))
_lowerCAmelCase : Dict = jnp.array(
[0.3_611_307, 0.37_649_736, 0.3_757_408, 0.38_213_953, 0.39_295_167, 0.3_841_631, 0.41_554_978, 0.4_137_475, 0.4_217_084])
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1E-2
| 36 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344 | 0 |
'''simple docstring'''
_lowerCAmelCase = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 37 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) -> str:
A_ : Optional[int] = parent
A_ : Dict = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = patch_size
A_ : List[str] = num_channels
A_ : List[Any] = is_training
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = hidden_size
A_ : str = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Any = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Dict = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : str = scope
A_ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : Tuple = (image_size // patch_size) ** 2
A_ : Union[str, Any] = num_patches + 2
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[str] = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : int = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Dict = 1
A_ : Optional[int] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = self.type_sequence_label_size
A_ : Tuple = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Dict = 1
A_ : Any = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = DeiTModelTester(self )
A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]:
A_ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : List[str] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Any = False
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A_ : List[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Union[str, Any] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
A_ : Dict = problem_type["""title"""]
A_ : List[Any] = problem_type["""num_labels"""]
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
A_ : Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A_ : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
A_ : List[str] = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
A_ : Optional[int] = self.default_image_processor
A_ : str = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Any = model(**_lowerCamelCase )
# verify the logits
A_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : List[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : Union[str, Any] = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : List[Any] = model(_lowerCamelCase )
| 344 | 0 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
snake_case__ : Dict = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ):
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _A ( cls : Tuple , *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ):
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _A ( cls : Tuple , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ):
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 38 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 32 , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowerCamelCase = True , _lowerCamelCase=7 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=3 , ) -> Union[str, Any]:
A_ : Optional[int] = parent
A_ : Union[str, Any] = do_resize
A_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 288}
A_ : Tuple = size_divisor
A_ : List[Any] = do_rescale
A_ : Dict = rescale_factor
A_ : List[Any] = do_normalize
A_ : Dict = do_center_crop
A_ : Optional[Any] = image_mean
A_ : List[str] = image_std
A_ : str = do_pad
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = min_resolution
A_ : Union[str, Any] = max_resolution
def UpperCAmelCase_ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
if not batched:
A_ : Union[str, Any] = self.size["""shortest_edge"""]
A_ : Dict = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
A_ , A_ : Optional[Any] = image.size
else:
A_ , A_ : int = image.shape[1], image.shape[2]
A_ : Optional[int] = size / min(_lowerCamelCase , _lowerCamelCase )
if h < w:
A_ , A_ : Optional[Any] = size, scale * w
else:
A_ , A_ : Dict = scale * h, size
A_ : Union[str, Any] = int((1333 / 800) * size )
if max(_lowerCamelCase , _lowerCamelCase ) > max_size:
A_ : str = max_size / max(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = newh * scale
A_ : Dict = neww * scale
A_ , A_ : str = int(newh + 0.5 ), int(neww + 0.5 )
A_ , A_ : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
A_ : Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size_divisor""" ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image processor
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image processor
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ort.SessionOptions()
_UpperCAmelCase = False
return options
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = 'A red cat sitting on a park bench'
_UpperCAmelCase = np.random.RandomState(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase , output_type='np' , )
_UpperCAmelCase = output.images
_UpperCAmelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.25_14, 0.30_07, 0.35_17, 0.17_90, 0.23_82, 0.31_67, 0.19_44, 0.22_73, 0.24_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
_UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = 'A red cat sitting on a park bench'
_UpperCAmelCase = np.random.RandomState(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase , output_type='np' , )
_UpperCAmelCase = output.images
_UpperCAmelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.00_86, 0.00_77, 0.00_83, 0.00_93, 0.01_07, 0.01_39, 0.00_94, 0.00_97, 0.01_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 39 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
A_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ : Tuple = int(input('Enter number of vertices: '))
UpperCamelCase__ : int = int(input('Enter number of edges: '))
UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ : Union[str, Any] = int(input('Enter source:'))
UpperCamelCase__ : int = int(input('Enter destination:'))
UpperCamelCase__ : Optional[Any] = float(input('Enter weight:'))
UpperCamelCase__ : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 344 | 0 |
"""simple docstring"""
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__lowercase = KEYMAP["""up"""]
__lowercase = KEYMAP["""left"""]
if sys.platform == "win32":
__lowercase = []
__lowercase = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowercase = ord(str(i))
def lowercase ( )-> List[str]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
a : Tuple = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(A_ ) == 0:
# Read the keystroke
a : Any = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
a : Union[str, Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
a : Tuple = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(A_ )
if ord(A_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
a : Dict = chr(KEYMAP["esc"] )
except KeyError:
a : Dict = cha[1]
else:
a : Any = ch.decode(A_ )
else:
a : List[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
a : str = sys.stdin.fileno()
a : Tuple = termios.tcgetattr(A_ )
try:
tty.setraw(A_ )
a : List[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(A_ , termios.TCSADRAIN , A_ )
return ch
def lowercase ( )-> Any:
'''simple docstring'''
a : Dict = get_raw_chars()
if ord(A_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(A_ ) == KEYMAP["esc"]:
a : Optional[Any] = get_raw_chars()
if ord(A_ ) == KEYMAP["mod_int"]:
a : Any = get_raw_chars()
if ord(A_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(A_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(A_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 40 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : List[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 344 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list:
if len(UpperCamelCase ) <= 1:
return [tuple(UpperCamelCase )]
lowerCamelCase__ : Union[str, Any] = []
def generate(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : List[str] = [0] * n
res.append(tuple(UpperCamelCase ) )
lowerCamelCase__ : str = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowerCamelCase__ , lowerCamelCase__ : Dict = arr[i], arr[0]
else:
lowerCamelCase__ , lowerCamelCase__ : Dict = arr[i], arr[c[i]]
res.append(tuple(UpperCamelCase ) )
c[i] += 1
lowerCamelCase__ : Optional[int] = 0
else:
lowerCamelCase__ : Dict = 0
i += 1
generate(len(UpperCamelCase ) , UpperCamelCase )
return res
if __name__ == "__main__":
_A : str =input('''Enter numbers separated by a comma:\n''').strip()
_A : Optional[int] =[int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> float:
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__A , __A ) or not isinstance(__A , __A ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case = float(factorial(__A ) )
coefficient /= factorial(__A ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 42 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vision-encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> str:
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
A_ : Optional[int] = kwargs.pop("""encoder""" )
A_ : List[str] = encoder_config.pop("""model_type""" )
A_ : str = kwargs.pop("""decoder""" )
A_ : Optional[Any] = decoder_config.pop("""model_type""" )
A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : int = True
A_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
A_ : Optional[Any] = OrderedDict()
A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
import torch
A_ : Optional[int] = OrderedDict()
A_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
A_ , A_ : str = dummy_input["""input_ids"""].shape
A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
A_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
A_ : List[str] = dummy_input.pop("""attention_mask""" )
A_ : Optional[int] = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> None:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig:
A_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 344 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :str = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :Union[str, Any] = value
else:
__UpperCamelCase :str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :int = fairseq_model.state_dict()
__UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[str] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :int = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Dict = '''weight'''
elif "bias" in name:
__UpperCamelCase :Dict = '''bias'''
else:
__UpperCamelCase :Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :str = int(items[0] )
__UpperCamelCase :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Optional[int] = target_dict.pad_index
__UpperCamelCase :Dict = target_dict.bos_index
__UpperCamelCase :str = target_dict.eos_index
__UpperCamelCase :Dict = len(target_dict.symbols )
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.'
UpperCamelCase__ : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A_ : int = bytes(a_ , """utf-8""" )
with zstd.open(a_ , """wb""" ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Any = input_paths[compression_format]
A_ : Tuple = tmp_path / """cache"""
A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
A_ : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
A_ : Optional[Any] = f.read()
with open(a_ ) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = """custom_cache"""
A_ : List[str] = """custom_extracted_dir"""
A_ : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : List[Any] = xz_file
A_ : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
A_ : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
A_ : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with pytest.raises(a_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
http_get("""https://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head("""s3://huggingface.co""" )
| 344 | 0 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def __A ( *a__ , **a__ ):
pass
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ) -> str:
_lowerCAmelCase : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ) -> Dict:
_lowerCAmelCase : Dict = np.array(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = npimg.shape
return {"hash": hashimage(_lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __A ( unittest.TestCase ):
_UpperCamelCase : Optional[Any] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_UpperCamelCase : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __A ( self , a__ , a__ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def __A ( self ):
pass
@slow
@require_torch
def __A ( self ):
_lowerCAmelCase : Dict = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
_lowerCAmelCase : Optional[int] = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
_lowerCAmelCase : Dict = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9_9_6_7},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.9_9_3},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9_9_0_9},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9_8_7_9},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9_8_3_4},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9_7_1_6},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9_6_1_2},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9_5_9_9},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9_5_5_2},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9_5_3_2},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9_5_1_6},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9_4_9_9},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9_4_8_3},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9_4_6_4},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9_4_0_8},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9_3_3_5},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9_3_2_6},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9_2_6_2},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8_9_9_9},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8_9_8_6},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8_9_8_4},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8_8_7_3},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = """facebook/sam-vit-huge"""
_lowerCAmelCase : Any = pipeline("""mask-generation""" , model=a__ )
_lowerCAmelCase : Optional[int] = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_lowerCAmelCase : Tuple = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1_0},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3},
] , )
| 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : List[str]
__UpperCAmelCase : Optional[str] = None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] = "dict"
__UpperCAmelCase : ClassVar[Any] = None
__UpperCAmelCase : str = field(default='Translation' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __UpperCAmelCase ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[List] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[str] = None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] = "dict"
__UpperCAmelCase : ClassVar[Any] = None
__UpperCAmelCase : str = field(default='TranslationVariableLanguages' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self ):
__a = sorted(set(self.languages ) ) if self.languages else None
__a = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __UpperCAmelCase ( self , _a ):
__a = set(self.languages )
if self.languages and set(_a ) - lang_set:
raise ValueError(
f'''Some languages in example ({', '.join(sorted(set(_a ) - lang_set ) )}) are not in valid set ({', '.join(_a )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__a = []
for lang, text in translation_dict.items():
if isinstance(_a , _a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__a , __a = zip(*sorted(_a ) )
return {"language": languages, "translation": translations}
def __UpperCAmelCase ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 45 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any]=None ):
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(add_help=SCREAMING_SNAKE_CASE , allow_abbrev=SCREAMING_SNAKE_CASE )
# The main config parser
lowerCAmelCase = config_command_parser(SCREAMING_SNAKE_CASE )
# The subparser to add commands to
lowerCAmelCase = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
update_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
return config_parser
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = get_config_parser()
lowerCAmelCase = config_parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 46 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : int = 1_00_00_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =set(range(3 , _UpperCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _UpperCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _UpperCamelCase , _UpperCamelCase ) ) )
_SCREAMING_SNAKE_CASE =[float(_UpperCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_UpperCamelCase , limit + 1 , _UpperCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 47 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]:
A_ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : List[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
A_ : Dict = scheduler_class(**_lowerCamelCase )
A_ : Dict = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Any = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[Any] = pred_prev_sample
A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(25 )
A_ : List[str] = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
if i + 1 == timesteps.shape[0]:
A_ : List[str] = None
else:
A_ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Optional[Any] = pred_prev_sample
A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
pass
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : List[str] = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _A :
UpperCamelCase__ : Dict = BlenderbotSmallConfig
UpperCamelCase__ : Tuple = {}
UpperCamelCase__ : str = '''gelu'''
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Dict=99 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=20 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : List[Any]=0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
__a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
__a = tf.concat([input_ids, eos_tensor] , axis=1)
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a = prepare_blenderbot_small_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return config, inputs_dict
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = TFBlenderbotSmallModel(config=__SCREAMING_SNAKE_CASE).get_decoder()
__a = inputs_dict['''input_ids''']
__a = input_ids[:1, :]
__a = inputs_dict['''attention_mask'''][:1, :]
__a = inputs_dict['''head_mask''']
__a = 1
# first forward pass
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE)
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size)
__a = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
__a = tf.concat([input_ids, next_tokens] , axis=-1)
__a = tf.concat([attention_mask, next_attn_mask] , axis=-1)
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
__a = int(ids_tensor((1,) , output_from_past.shape[-1]))
__a = output_from_no_past[:, -3:, random_slice_idx]
__a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-3)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if attention_mask is None:
__a = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCamelCase__ : Union[str, Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ : str = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : int = False
UpperCamelCase__ : str = False
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = TFBlenderbotSmallModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE)
@require_tokenizers
@require_tf
class _A ( unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
UpperCamelCase__ : str = '''facebook/blenderbot_small-90M'''
@cached_property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''')
@cached_property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.tokenizer(self.src_text , return_tensors='''tf''')
__a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__SCREAMING_SNAKE_CASE , )
__a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__SCREAMING_SNAKE_CASE)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 49 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str:
super().__init__()
A_ : Optional[Any] = pad_token_id
A_ : List[Any] = max_length
A_ : str = vocab
A_ : Union[str, Any] = merges
A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int:
A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
A_ : Dict = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str:
A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]:
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any:
A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase )
A_ : Any = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : Tuple = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 344 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase="shi-labs/oneformer_demo" ) -> List[Any]:
with open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) as f:
lowerCamelCase__ : str = json.load(_UpperCAmelCase )
lowerCamelCase__ : Tuple = {}
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : str = []
for key, info in class_info.items():
lowerCamelCase__ : Union[str, Any] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(_UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = thing_ids
lowerCamelCase__ : Union[str, Any] = class_names
return metadata
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : int=30 , UpperCAmelCase : List[str]=400 , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Any=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=[0.5, 0.5, 0.5] , UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=255 , UpperCAmelCase : Any="shi-labs/oneformer_demo" , UpperCAmelCase : Any="ade20k_panoptic.json" , UpperCAmelCase : List[Any]=10 , ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : Union[str, Any] = min_resolution
lowerCamelCase__ : int = max_resolution
lowerCamelCase__ : Dict = do_resize
lowerCamelCase__ : Optional[int] = {'shortest_edge': 32, 'longest_edge': 1333} if size is None else size
lowerCamelCase__ : Dict = do_normalize
lowerCamelCase__ : Tuple = image_mean
lowerCamelCase__ : List[str] = image_std
lowerCamelCase__ : Any = class_info_file
lowerCamelCase__ : Any = prepare_metadata(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = num_text
lowerCamelCase__ : List[str] = repo_path
# for the post_process_functions
lowerCamelCase__ : Any = 2
lowerCamelCase__ : str = 10
lowerCamelCase__ : str = 10
lowerCamelCase__ : Any = 3
lowerCamelCase__ : Union[str, Any] = 4
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : str = do_reduce_labels
lowerCamelCase__ : str = ignore_index
def A_ ( self : Union[str, Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A_ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=False ) -> int:
if not batched:
lowerCamelCase__ : List[str] = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = image.size
else:
lowerCamelCase__ , lowerCamelCase__ : Dict = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__ : Dict = int(self.size['shortest_edge'] * h / w )
lowerCamelCase__ : List[Any] = self.size['shortest_edge']
elif w > h:
lowerCamelCase__ : Optional[Any] = self.size['shortest_edge']
lowerCamelCase__ : str = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase__ : str = self.size['shortest_edge']
lowerCamelCase__ : Union[str, Any] = self.size['shortest_edge']
else:
lowerCamelCase__ : Any = []
for image in image_inputs:
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase__ : Optional[Any] = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
lowerCamelCase__ : str = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
def A_ ( self : Tuple ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCAmelCase__ = image_processing_class
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def A_ ( self : str ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def A_ ( self : int ) -> Any:
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_reduce_labels' ) )
def A_ ( self : str ) -> List[Any]:
pass
def A_ ( self : Tuple ) -> Union[str, Any]:
# Initialize image_processor
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : List[str] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processing_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
lowerCamelCase__ : List[str] = image_processor(
UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Tuple ) -> str:
# Initialize image_processor
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[str] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.image_processing_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
lowerCamelCase__ : str = image_processor(
UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
# Initialize image_processor
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processing_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : int = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
lowerCamelCase__ : int = image_processor(
UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : int , UpperCAmelCase : List[str]=False , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Union[str, Any]="np" ) -> str:
lowerCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowerCamelCase__ : Dict = self.image_processing_tester.num_labels
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase )
if with_segmentation_maps:
lowerCamelCase__ : Tuple = num_labels
if is_instance_map:
lowerCamelCase__ : Dict = list(range(UpperCAmelCase ) ) * 2
lowerCamelCase__ : Optional[int] = dict(enumerate(UpperCAmelCase ) )
lowerCamelCase__ : int = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowerCamelCase__ : Optional[int] = [Image.fromarray(UpperCAmelCase ) for annotation in annotations]
lowerCamelCase__ : List[str] = image_processor(
UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , UpperCAmelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCAmelCase , pad_and_return_pixel_mask=UpperCAmelCase , )
return inputs
def A_ ( self : str ) -> Any:
pass
def A_ ( self : Tuple ) -> List[Any]:
def common(UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Optional[Any]=None ):
lowerCamelCase__ : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCAmelCase , is_instance_map=UpperCAmelCase , segmentation_type=UpperCAmelCase )
lowerCamelCase__ : Tuple = inputs['mask_labels']
lowerCamelCase__ : Union[str, Any] = inputs['class_labels']
lowerCamelCase__ : Optional[Any] = inputs['pixel_values']
lowerCamelCase__ : List[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCAmelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCAmelCase )
common(is_instance_map=UpperCAmelCase , segmentation_type='pil' )
common(is_instance_map=UpperCAmelCase , segmentation_type='pil' )
def A_ ( self : Optional[int] ) -> Any:
lowerCamelCase__ : Dict = np.zeros((20, 50) )
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : Union[str, Any] = binary_mask_to_rle(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A_ ( self : Union[str, Any] ) -> str:
lowerCamelCase__ : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCamelCase__ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase__ : Any = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowerCamelCase__ : Any = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowerCamelCase__ : Dict = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase , target_sizes=UpperCAmelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A_ ( self : List[str] ) -> List[str]:
lowerCamelCase__ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCamelCase__ : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase__ : str = image_processor.post_process_instance_segmentation(UpperCAmelCase , threshold=0 )
self.assertTrue(len(UpperCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCAmelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A_ ( self : Any ) -> Union[str, Any]:
lowerCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCamelCase__ : int = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase__ : Tuple = image_processor.post_process_panoptic_segmentation(UpperCAmelCase , threshold=0 )
self.assertTrue(len(UpperCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCAmelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __snake_case ( a ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self._create_example_records()
UpperCAmelCase_ = Dataset.from_list(_snake_case)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(_snake_case):
self.assertDictEqual(_snake_case , example_records[i])
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self._create_example_records()
UpperCAmelCase_ = Dataset.from_list(_snake_case)
UpperCAmelCase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def lowerCamelCase ( self : Optional[Any]): # checks what happens with missing columns
"""simple docstring"""
UpperCAmelCase_ = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
UpperCAmelCase_ = Dataset.from_list(_snake_case)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def lowerCamelCase ( self : Any): # checks if the type can be inferred from the second record
"""simple docstring"""
UpperCAmelCase_ = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
UpperCAmelCase_ = Dataset.from_list(_snake_case)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = Dataset.from_list([])
self.assertEqual(len(_snake_case) , 0)
self.assertListEqual(dset.column_names , [])
| 51 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCamelCase : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 52 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( a_ , a_ ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : List[str] ={'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a__ : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any:
A_ : List[Any] = parent
A_ : int = config_class
A_ : int = has_text_modality
A_ : str = kwargs
A_ : int = common_properties
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : Optional[int] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
A_ : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = self.config_class(**self.inputs_dict )
A_ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" )
config_first.to_json_file(_lowerCamelCase )
A_ : Dict = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : List[Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A_ : str = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_class.is_composition:
return
A_ : Dict = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = copy.deepcopy(_lowerCamelCase )
A_ : Tuple = self.config_class(**_lowerCamelCase )
A_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 344 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
__SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
__SCREAMING_SNAKE_CASE = ya
__SCREAMING_SNAKE_CASE = xa
for k in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(lowerCAmelCase_ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = BlipImageProcessor()
lowerCamelCase_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
lowerCamelCase_ = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
lowerCamelCase_ = InstructBlipProcessor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).tokenizer
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).qformer_tokenizer
def snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
lowerCamelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=UpperCamelCase , image_processor=UpperCamelCase , qformer_tokenizer=UpperCamelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCamelCase , return_tensors="np" )
lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=UpperCamelCase , image_processor=UpperCamelCase , qformer_tokenizer=UpperCamelCase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = processor(text=UpperCamelCase )
lowerCamelCase_ = tokenizer(UpperCamelCase , return_token_type_ids=UpperCamelCase )
lowerCamelCase_ = qformer_tokenizer(UpperCamelCase , return_token_type_ids=UpperCamelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=UpperCamelCase , image_processor=UpperCamelCase , qformer_tokenizer=UpperCamelCase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=UpperCamelCase , image_processor=UpperCamelCase , qformer_tokenizer=UpperCamelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCamelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=UpperCamelCase , image_processor=UpperCamelCase , qformer_tokenizer=UpperCamelCase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 55 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]:
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = num_channels
A_ : Tuple = embeddings_size
A_ : str = hidden_sizes
A_ : Optional[Any] = depths
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_act
A_ : Optional[Any] = num_labels
A_ : str = scope
A_ : Optional[int] = len(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Dict = TFRegNetModel(config=_lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = self.num_labels
A_ : int = TFRegNetForImageClassification(_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = TFRegNetModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase_ ( self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Dict = layer_type
A_ : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" )
# forward pass
A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
A_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
| 344 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a : List[str] = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
snake_case_ = ["pixel_values"]
def __init__( self : Optional[Any] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ):
super().__init__(**lowercase_ )
snake_case_ = size if size is not None else {'''shortest_edge''': 224}
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case_ = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
snake_case_ = get_size_dict(lowercase_ , param_name='''crop_size''' )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_flip_channel_order
def A_ ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PIL.Image.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ = get_resize_output_image_size(lowercase_ , size=size['''shortest_edge'''] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ):
snake_case_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Optional[Union[str, ChannelDimension]] = None ):
return flip_channel_order(lowercase_ , data_format=lowercase_ )
def A_ ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Dict , ):
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowercase_ , param_name='''crop_size''' )
snake_case_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case_ = [self.flip_channel_order(image=lowercase_ ) for image in images]
snake_case_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
snake_case_ = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Tuple] = None ):
snake_case_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase_ ):
snake_case_ = target_sizes.numpy()
snake_case_ = []
for idx in range(len(lowercase_ ) ):
snake_case_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase_ )
snake_case_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
snake_case_ = logits.argmax(dim=1 )
snake_case_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 56 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = n * (n + 1) * (2 * n + 1) / 6
A_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 344 | 0 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
A : Optional[int] = parser.parse_args()
A : Dict = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
A : List[Any] = CLIPImageProcessor()
A : Union[str, Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
A : str = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 57 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase__ : int = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
'''simple docstring'''
import random
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) ->Optional[int]:
_SCREAMING_SNAKE_CASE = a[left_index]
_SCREAMING_SNAKE_CASE = left_index + 1
for j in range(left_index + 1 , __lowerCamelCase ):
if a[j] < pivot:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = a[i], a[j]
i += 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) ->str:
if left < right:
_SCREAMING_SNAKE_CASE = random.randint(__lowerCamelCase , right - 1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_SCREAMING_SNAKE_CASE = partition(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
quick_sort_random(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowerCamelCase , pivot_index + 1 , __lowerCamelCase ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ) ->Tuple:
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""" ).strip()
_SCREAMING_SNAKE_CASE = [int(__lowerCamelCase ) for item in user_input.split(""",""" )]
quick_sort_random(__lowerCamelCase , 0 , len(__lowerCamelCase ) )
print(__lowerCamelCase )
if __name__ == "__main__":
main()
| 58 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__lowerCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__lowerCamelCase = TaTokenizerFast
__lowerCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__lowerCamelCase = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 59 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) -> str:
A_ : Optional[int] = parent
A_ : Dict = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = patch_size
A_ : List[str] = num_channels
A_ : List[Any] = is_training
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = hidden_size
A_ : str = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Any = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Dict = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : str = scope
A_ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : Tuple = (image_size // patch_size) ** 2
A_ : Union[str, Any] = num_patches + 2
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[str] = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : int = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Dict = 1
A_ : Optional[int] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = self.type_sequence_label_size
A_ : Tuple = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Dict = 1
A_ : Any = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = DeiTModelTester(self )
A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]:
A_ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : List[str] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Any = False
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A_ : List[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Union[str, Any] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
A_ : Dict = problem_type["""title"""]
A_ : List[Any] = problem_type["""num_labels"""]
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
A_ : Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A_ : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
A_ : List[str] = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
A_ : Optional[int] = self.default_image_processor
A_ : str = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Any = model(**_lowerCamelCase )
# verify the logits
A_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : List[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : Union[str, Any] = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : List[Any] = model(_lowerCamelCase )
| 344 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 60 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 32 , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowerCamelCase = True , _lowerCamelCase=7 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=3 , ) -> Union[str, Any]:
A_ : Optional[int] = parent
A_ : Union[str, Any] = do_resize
A_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 288}
A_ : Tuple = size_divisor
A_ : List[Any] = do_rescale
A_ : Dict = rescale_factor
A_ : List[Any] = do_normalize
A_ : Dict = do_center_crop
A_ : Optional[Any] = image_mean
A_ : List[str] = image_std
A_ : str = do_pad
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = min_resolution
A_ : Union[str, Any] = max_resolution
def UpperCAmelCase_ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
if not batched:
A_ : Union[str, Any] = self.size["""shortest_edge"""]
A_ : Dict = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
A_ , A_ : Optional[Any] = image.size
else:
A_ , A_ : int = image.shape[1], image.shape[2]
A_ : Optional[int] = size / min(_lowerCamelCase , _lowerCamelCase )
if h < w:
A_ , A_ : Optional[Any] = size, scale * w
else:
A_ , A_ : Dict = scale * h, size
A_ : Union[str, Any] = int((1333 / 800) * size )
if max(_lowerCamelCase , _lowerCamelCase ) > max_size:
A_ : str = max_size / max(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = newh * scale
A_ : Dict = neww * scale
A_ , A_ : str = int(newh + 0.5 ), int(neww + 0.5 )
A_ , A_ : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
A_ : Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size_divisor""" ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image processor
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image processor
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=64 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : str = use_labels
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = embedding_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Optional[int] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : str = num_choices
UpperCAmelCase_ : List[str] = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = MobileBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase_ : Dict = model(lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = MobileBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = MobileBertForNextSentencePrediction(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = MobileBertForPreTraining(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , next_sentence_label=lowercase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = MobileBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : List[str] = MobileBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Tuple = MobileBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.num_choices
UpperCAmelCase_ : Dict = MobileBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : str = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = True
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=False ):
"""simple docstring"""
UpperCAmelCase_ : str = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = MobileBertModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase_ )
def __a ( __lowerCamelCase ):
return torch.tensor(
__lowerCamelCase, dtype=torch.long, device=__lowerCamelCase, )
_a = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(lowercase_ )
UpperCAmelCase_ : int = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ )[0]
UpperCAmelCase_ : int = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase_ : List[str] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6E0_7, 8.2_6_9_1_6_5_6E0_4, 1.6_5_2_1_8_3_8E0_5],
[-5.7_5_4_1_7_0_4E-0_1, 3.9_0_5_6_0_2_2E0_0, 4.4_0_1_1_5_0_7E0_0],
[2.6_0_4_7_3_5_9E0_0, 1.5_6_7_7_6_5_2E0_0, -1.7_3_2_4_1_8_8E-0_1],
]
] , device=lowercase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : str = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 61 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
A_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ : Tuple = int(input('Enter number of vertices: '))
UpperCamelCase__ : int = int(input('Enter number of edges: '))
UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ : Union[str, Any] = int(input('Enter source:'))
UpperCamelCase__ : int = int(input('Enter destination:'))
UpperCamelCase__ : Optional[Any] = float(input('Enter weight:'))
UpperCamelCase__ : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 344 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_A = logging.get_logger(__name__)
_A = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "bart"
UpperCAmelCase__ : List[Any] = ["past_key_values"]
UpperCAmelCase__ : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=50265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ) -> Optional[int]:
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =d_model
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =classifier_dropout
__UpperCamelCase =use_cache
__UpperCamelCase =encoder_layers
__UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ):
__UpperCamelCase =self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__UpperCamelCase ={0: 'batch'}
__UpperCamelCase ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__UpperCamelCase ={0: 'batch', 1: 'decoder_sequence'}
__UpperCamelCase ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__UpperCamelCase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__UpperCamelCase , __UpperCamelCase =self.num_layers
for i in range(A_ ):
__UpperCamelCase ={0: 'batch', 2: 'past_sequence + sequence'}
__UpperCamelCase ={0: 'batch', 2: 'past_sequence + sequence'}
else:
__UpperCamelCase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase =super().outputs
else:
__UpperCamelCase =super(A_ , self ).outputs
if self.use_past:
__UpperCamelCase , __UpperCamelCase =self.num_layers
for i in range(A_ ):
__UpperCamelCase ={0: 'batch', 2: 'past_sequence + sequence'}
__UpperCamelCase ={0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _a ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
__UpperCamelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
# Generate decoder inputs
__UpperCamelCase =seq_length if not self.use_past else 1
__UpperCamelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
__UpperCamelCase ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__UpperCamelCase =dict(**A_ , **A_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase , __UpperCamelCase =common_inputs['input_ids'].shape
__UpperCamelCase =common_inputs['decoder_input_ids'].shape[1]
__UpperCamelCase , __UpperCamelCase =self.num_attention_heads
__UpperCamelCase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase =decoder_seq_length + 3
__UpperCamelCase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__UpperCamelCase =torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(A_ , A_ )] , dim=1 )
__UpperCamelCase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__UpperCamelCase , __UpperCamelCase =self.num_layers
__UpperCamelCase =min(A_ , A_ )
__UpperCamelCase =max(A_ , A_ ) - min_num_layers
__UpperCamelCase ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(A_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
) )
# TODO: test this.
__UpperCamelCase =encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(A_ , A_ ):
common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) )
return common_inputs
def _a ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
__UpperCamelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase , __UpperCamelCase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCamelCase =seqlen + 2
__UpperCamelCase , __UpperCamelCase =self.num_layers
__UpperCamelCase , __UpperCamelCase =self.num_attention_heads
__UpperCamelCase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase =common_inputs['attention_mask'].dtype
__UpperCamelCase =torch.cat(
[common_inputs['attention_mask'], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
__UpperCamelCase =[
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ )
]
return common_inputs
def _a ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__UpperCamelCase =compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCamelCase =tokenizer.num_special_tokens_to_add(A_ )
__UpperCamelCase =compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
__UpperCamelCase =[' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__UpperCamelCase =dict(tokenizer(A_ , return_tensors=A_ ) )
return common_inputs
def _a ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
elif self.task == "causal-lm":
__UpperCamelCase =self._generate_dummy_inputs_for_causal_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
else:
__UpperCamelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
return common_inputs
def _a ( self , A_ , A_ , A_ , A_ ) -> str:
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase =super()._flatten_past_key_values_(A_ , A_ , A_ , A_ )
else:
__UpperCamelCase =super(A_ , self )._flatten_past_key_values_(
A_ , A_ , A_ , A_ )
| 62 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : List[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 344 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : list , lowercase : int , lowercase : int = 0 , lowercase : int = 0 ) -> int:
_a = right or len(lowercase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowercase , lowercase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Optional[Any]=False ):
"""simple docstring"""
_snake_case : Any = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
_snake_case : List[str] = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
_snake_case : int = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_snake_case : Optional[int] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_snake_case : Dict = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(snake_case__ )-1}" )
if "norm" in key:
_snake_case : Union[str, Any] = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_snake_case : str = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
_snake_case : int = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(snake_case__ )-1}" )
if "layer_norm1" in key:
_snake_case : List[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
_snake_case : int = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_snake_case : str = key[key.find("""block""" ) + len("""block""" )]
_snake_case : str = key.replace(F"block{idx}" , F"block.{int(snake_case__ )-1}" )
if "attn.q" in key:
_snake_case : Any = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
_snake_case : int = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
_snake_case : int = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
_snake_case : Optional[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
_snake_case : Optional[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
_snake_case : Optional[Any] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
_snake_case : Union[str, Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
_snake_case : List[Any] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_snake_case : int = key[key.find("""linear_c""" ) + len("""linear_c""" )]
_snake_case : Optional[Any] = key.replace(F"linear_c{idx}" , F"linear_c.{int(snake_case__ )-1}" )
if key.startswith("""head""" ):
_snake_case : Optional[int] = key.replace("""head""" , """classifier""" )
_snake_case : Optional[Any] = value
return new_state_dict
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_snake_case : Tuple = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
_snake_case : Tuple = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
_snake_case : List[Any] = kv_weight[
: config.hidden_sizes[i], :
]
_snake_case : List[str] = kv_bias[: config.hidden_sizes[i]]
_snake_case : Any = kv_weight[
config.hidden_sizes[i] :, :
]
_snake_case : Optional[Any] = kv_bias[
config.hidden_sizes[i] :
]
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : str = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Tuple = SegformerConfig()
_snake_case : Optional[Any] = False
# set attributes based on model_name
_snake_case : Optional[Any] = """huggingface/label-files"""
if "segformer" in model_name:
_snake_case : Tuple = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
_snake_case : Optional[Any] = 1_50
_snake_case : Tuple = """ade20k-id2label.json"""
_snake_case : List[Any] = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
_snake_case : Optional[int] = 19
_snake_case : Dict = """cityscapes-id2label.json"""
_snake_case : str = (1, 19, 1_28, 1_28)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
_snake_case : Union[str, Any] = True
_snake_case : List[str] = model_name[4:6]
_snake_case : Dict = 10_00
_snake_case : Optional[int] = """imagenet-1k-id2label.json"""
_snake_case : Tuple = (1, 10_00)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
_snake_case : Optional[int] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : int = idalabel
_snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_snake_case : Optional[int] = [64, 1_28, 3_20, 5_12]
_snake_case : Optional[int] = 2_56
elif size == "b2":
_snake_case : Any = [64, 1_28, 3_20, 5_12]
_snake_case : List[str] = 7_68
_snake_case : List[str] = [3, 4, 6, 3]
elif size == "b3":
_snake_case : List[Any] = [64, 1_28, 3_20, 5_12]
_snake_case : str = 7_68
_snake_case : List[Any] = [3, 4, 18, 3]
elif size == "b4":
_snake_case : Any = [64, 1_28, 3_20, 5_12]
_snake_case : Any = 7_68
_snake_case : Dict = [3, 8, 27, 3]
elif size == "b5":
_snake_case : int = [64, 1_28, 3_20, 5_12]
_snake_case : str = 7_68
_snake_case : List[str] = [3, 6, 40, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
_snake_case : Optional[int] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=snake_case__ , align=snake_case__ , do_random_crop=snake_case__ )
# prepare image
_snake_case : Dict = prepare_img()
_snake_case : List[Any] = image_processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
_snake_case : Dict = torch.load(snake_case__ , map_location=torch.device("""cpu""" ) )
else:
_snake_case : str = torch.load(snake_case__ , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
_snake_case : int = rename_keys(snake_case__ , encoder_only=snake_case__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(snake_case__ , snake_case__ )
# create HuggingFace model and load state dict
if encoder_only:
_snake_case : List[Any] = False
_snake_case : Optional[Any] = SegformerForImageClassification(snake_case__ )
else:
_snake_case : List[str] = SegformerForSemanticSegmentation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# forward pass
_snake_case : List[Any] = model(snake_case__ )
_snake_case : Any = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_snake_case : Tuple = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_snake_case : Union[str, Any] = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_snake_case : List[Any] = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_snake_case : Union[str, Any] = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_snake_case : Union[str, Any] = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_snake_case : List[Any] = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_snake_case : Optional[Any] = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_snake_case : Optional[Any] = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_snake_case : str = torch.tensor(
[
[
[-1.1_3_7_2e0_1, -1.2_7_8_7e0_1, -1.3_4_7_7e0_1],
[-1.2_5_3_6e0_1, -1.4_1_9_4e0_1, -1.4_4_0_9e0_1],
[-1.3_2_1_7e0_1, -1.4_8_8_8e0_1, -1.5_3_2_7e0_1],
],
[
[-1.4_7_9_1e0_1, -1.7_1_2_2e0_1, -1.8_2_7_7e0_1],
[-1.7_1_6_3e0_1, -1.9_1_9_2e0_1, -1.9_5_3_3e0_1],
[-1.7_8_9_7e0_1, -1.9_9_9_1e0_1, -2.0_3_1_5e0_1],
],
[
[7.6_7_2_3e-0_1, 4.1_9_2_1e-0_1, -7.7_8_7_8e-0_2],
[4.7_7_7_2e-0_1, 9.5_5_5_7e-0_3, -2.8_0_8_2e-0_1],
[3.6_0_3_2e-0_1, -2.4_8_2_6e-0_1, -5.1_1_6_8e-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_snake_case : Any = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_snake_case : Tuple = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_snake_case : List[Any] = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_snake_case : int = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_snake_case : Tuple = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_snake_case : List[Any] = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
_snake_case : Optional[int] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vision-encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> str:
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
A_ : Optional[int] = kwargs.pop("""encoder""" )
A_ : List[str] = encoder_config.pop("""model_type""" )
A_ : str = kwargs.pop("""decoder""" )
A_ : Optional[Any] = decoder_config.pop("""model_type""" )
A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : int = True
A_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
A_ : Optional[Any] = OrderedDict()
A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
import torch
A_ : Optional[int] = OrderedDict()
A_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
A_ , A_ : str = dummy_input["""input_ids"""].shape
A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
A_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
A_ : List[str] = dummy_input.pop("""attention_mask""" )
A_ : Optional[int] = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> None:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig:
A_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 344 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
def run_func(__A ):
@wraps(__A )
def run_in_eager_mode(*__A, **__A ):
return func(*__A, **__A )
@wraps(__A )
@tf.function(experimental_compile=__A )
def run_in_graph_mode(*__A, **__A ):
return func(*__A, **__A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCAmelCase_ ( __A, __A, __A ) -> ["tf.Tensor"]:
'''simple docstring'''
UpperCAmelCase__ = random.Random()
UpperCAmelCase__ = [rng.randint(0, vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__A, shape=(batch_size, sequence_length), dtype=tf.intaa )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : TensorFlowBenchmarkArguments
__UpperCAmelCase : PretrainedConfig
__UpperCAmelCase : str = "TensorFlow"
@property
def lowercase_ (self : Any ) -> str:
"""simple docstring"""
return tf.__version__
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> float:
"""simple docstring"""
UpperCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ = self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self._measure_speed(_inference )
def lowercase_ (self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> float:
"""simple docstring"""
UpperCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ = self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self._measure_speed(_train )
def lowercase_ (self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase )
UpperCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ = self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self._measure_memory(_inference )
def lowercase_ (self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase )
UpperCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ = self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self._measure_memory(_train )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Callable[[], None]:
"""simple docstring"""
UpperCAmelCase__ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ = (
hasattr(__UpperCAmelCase , "architectures" )
and isinstance(config.architectures , __UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ = getattr(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = model_cls(__UpperCAmelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ = TF_MODEL_MAPPING[config.__class__](__UpperCAmelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ = config.vocab_size if hasattr(__UpperCAmelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ = random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , training=__UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCAmelCase , training=__UpperCAmelCase )
UpperCAmelCase__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Callable[[], None]:
"""simple docstring"""
UpperCAmelCase__ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ = (
hasattr(__UpperCAmelCase , "architectures" )
and isinstance(config.architectures , __UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ = getattr(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = model_cls(__UpperCAmelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCAmelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ = config.vocab_size if hasattr(__UpperCAmelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ = random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )[0]
UpperCAmelCase__ = tf.gradients(__UpperCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )[0]
UpperCAmelCase__ = tf.gradients(__UpperCAmelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowercase_ (self : Tuple , __UpperCAmelCase : Dict ) -> float:
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ = timeit.repeat(
__UpperCAmelCase , repeat=self.args.repeat , number=1_0 , )
return min(__UpperCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Callable[[], None] ) -> [Memory, MemorySummary]:
"""simple docstring"""
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ = nvml.nvmlDeviceGetMemoryInfo(__UpperCAmelCase )
UpperCAmelCase__ = meminfo.used
UpperCAmelCase__ = Memory(__UpperCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ = None
else:
UpperCAmelCase__ = measure_peak_memory_cpu(__UpperCAmelCase )
UpperCAmelCase__ = Memory(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ = stop_memory_tracing(__UpperCAmelCase )
if memory is None:
UpperCAmelCase__ = summary.total
else:
UpperCAmelCase__ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 65 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.'
UpperCamelCase__ : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A_ : int = bytes(a_ , """utf-8""" )
with zstd.open(a_ , """wb""" ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Any = input_paths[compression_format]
A_ : Tuple = tmp_path / """cache"""
A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
A_ : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
A_ : Optional[Any] = f.read()
with open(a_ ) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = """custom_cache"""
A_ : List[str] = """custom_extracted_dir"""
A_ : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : List[Any] = xz_file
A_ : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
A_ : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
A_ : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with pytest.raises(a_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
http_get("""https://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head("""s3://huggingface.co""" )
| 344 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
snake_case_ :Optional[int] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
snake_case_ :List[Any] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
snake_case_ :Dict = """The dog is cute and lives in the garden house"""
snake_case_ :Dict = jnp.array([tokenizer.encode(snake_case )] )
snake_case_ :Optional[Any] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
snake_case_ :Optional[Any] = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
snake_case_ :List[Any] = model(snake_case )["""last_hidden_state"""]
self.assertEqual(output.shape , snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case , atol=1E-3 ) )
| 66 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Any ="llama"
lowerCamelCase : Optional[int] =["past_key_values"]
def __init__( self : Union[str, Any] , a : List[Any]=3_20_00 , a : Any=40_96 , a : Optional[Any]=1_10_08 , a : List[Any]=32 , a : Union[str, Any]=32 , a : Tuple=None , a : Any="silu" , a : Optional[Any]=20_48 , a : str=0.02 , a : Any=1e-6 , a : List[str]=True , a : int=0 , a : Union[str, Any]=1 , a : List[str]=2 , a : Tuple=1 , a : Union[str, Any]=False , a : Union[str, Any]=None , **a : str , ):
"""simple docstring"""
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = intermediate_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__lowerCamelCase = num_attention_heads
__lowerCamelCase = num_key_value_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = initializer_range
__lowerCamelCase = rms_norm_eps
__lowerCamelCase = pretraining_tp
__lowerCamelCase = use_cache
__lowerCamelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a , )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__lowerCamelCase = self.rope_scaling.get('''type''' , a )
__lowerCamelCase = self.rope_scaling.get('''factor''' , a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(a , a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 67 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 68 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = PILImageResampling.BICUBIC, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = 1 / 255, lowerCAmelCase__ = None, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = size if size is not None else {'height': 224, 'width': 224}
snake_case_ = get_size_dict(lowerCAmelCase__)
snake_case_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__, param_name='crop_size')
snake_case_ = do_resize
snake_case_ = do_rescale
snake_case_ = do_normalize
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = size
snake_case_ = resample
snake_case_ = rescale_factor
snake_case_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = PILImageResampling.BILINEAR, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__)
if "shortest_edge" in size:
snake_case_ = get_resize_output_image_size(lowerCAmelCase__, size=size['shortest_edge'], default_to_square=lowerCAmelCase__)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
snake_case_ = (size['height'], size['width'])
else:
raise ValueError(f'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}')
return resize(lowerCAmelCase__, size=lowerCAmelCase__, resample=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(lowerCAmelCase__, size=(size['height'], size['width']), data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
return rescale(lowerCAmelCase__, scale=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> BatchFeature:
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowerCAmelCase__, param_name='crop_size', default_to_square=lowerCAmelCase__)
snake_case_ = resample if resample is not None else self.resample
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowerCAmelCase__)
if not is_batched(lowerCAmelCase__):
snake_case_ = [images]
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
snake_case_ = [self.resize(image=lowerCAmelCase__, size=lowerCAmelCase__, resample=lowerCAmelCase__) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=lowerCAmelCase__, size=lowerCAmelCase__) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=lowerCAmelCase__, scale=lowerCAmelCase__) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__) for image in images]
snake_case_ = [to_channel_dimension_format(lowerCAmelCase__, lowerCAmelCase__) for image in images]
snake_case_ = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase__, tensor_type=lowerCAmelCase__)
| 69 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]:
A_ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : List[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
A_ : Dict = scheduler_class(**_lowerCamelCase )
A_ : Dict = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Any = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[Any] = pred_prev_sample
A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(25 )
A_ : List[str] = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
if i + 1 == timesteps.shape[0]:
A_ : List[str] = None
else:
A_ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Optional[Any] = pred_prev_sample
A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
pass
| 344 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with open(lowerCAmelCase ) as metadata_file:
_lowerCAmelCase = json.load(lowerCAmelCase )
_lowerCAmelCase = LukeConfig(use_entity_aware_attention=lowerCAmelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
_lowerCAmelCase = load_original_entity_vocab(lowerCAmelCase )
# add an entry for [MASK2]
_lowerCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_lowerCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase = AddedToken("""<ent>""" , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )
_lowerCAmelCase = AddedToken("""<ent2>""" , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """tokenizer_config.json""" ) , """r""" ) as f:
_lowerCAmelCase = json.load(lowerCAmelCase )
_lowerCAmelCase = """MLukeTokenizer"""
with open(os.path.join(lowerCAmelCase , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = MLukeTokenizer.from_pretrained(lowerCAmelCase )
# Initialize the embeddings of the special tokens
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
_lowerCAmelCase = state_dict["""embeddings.word_embeddings.weight"""]
_lowerCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_lowerCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_lowerCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_lowerCAmelCase = state_dict[bias_name]
_lowerCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_lowerCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_lowerCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase = f"encoder.layer.{layer_index}.attention.self."
_lowerCAmelCase = state_dict[prefix + matrix_name]
_lowerCAmelCase = state_dict[prefix + matrix_name]
_lowerCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_lowerCAmelCase = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_lowerCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_lowerCAmelCase = state_dict["""entity_predictions.bias"""]
_lowerCAmelCase = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_lowerCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_lowerCAmelCase = LukeForMaskedLM(config=lowerCAmelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
_lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
_lowerCAmelCase = state_dict[key]
else:
_lowerCAmelCase = state_dict[key]
_lowerCAmelCase , _lowerCAmelCase = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
if set(lowerCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(lowerCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_lowerCAmelCase = MLukeTokenizer.from_pretrained(lowerCAmelCase , task="""entity_classification""" )
_lowerCAmelCase = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
_lowerCAmelCase = (0, 9)
_lowerCAmelCase = tokenizer(lowerCAmelCase , entity_spans=[span] , return_tensors="""pt""" )
_lowerCAmelCase = model(**lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCAmelCase = torch.Size((1, 33, 7_68) )
_lowerCAmelCase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCAmelCase = torch.Size((1, 1, 7_68) )
_lowerCAmelCase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_lowerCAmelCase = MLukeTokenizer.from_pretrained(lowerCAmelCase )
_lowerCAmelCase = """Tokyo is the capital of <mask>."""
_lowerCAmelCase = (24, 30)
_lowerCAmelCase = tokenizer(lowerCAmelCase , entity_spans=[span] , return_tensors="""pt""" )
_lowerCAmelCase = model(**lowerCAmelCase )
_lowerCAmelCase = encoding["""input_ids"""][0].tolist()
_lowerCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
_lowerCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase )
_lowerCAmelCase = outputs.entity_logits[0][0].argmax().item()
_lowerCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowerCAmelCase ) )
model.save_pretrained(lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = ["""[MASK]""", """[PAD]""", """[UNK]"""]
_lowerCAmelCase = [json.loads(lowerCAmelCase ) for line in open(lowerCAmelCase )]
_lowerCAmelCase = {}
for entry in data:
_lowerCAmelCase = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_lowerCAmelCase = entity_id
break
_lowerCAmelCase = f"{language}:{entity_name}"
_lowerCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
A__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
A__ : Union[str, Any] =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 70 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344 | 0 |
def A ( a_ ) -> int:
if not isinstance(a_ ,a_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
__UpperCamelCase : Any =0
__UpperCamelCase : List[str] =str(a_ )
while len(a_ ) != 1:
__UpperCamelCase : Optional[int] =[int(a_ ) for i in num_string]
__UpperCamelCase : List[Any] =1
for i in range(0 ,len(a_ ) ):
total *= numbers[i]
__UpperCamelCase : List[str] =str(a_ )
steps += 1
return steps
def A ( a_ ) -> int:
if not isinstance(a_ ,a_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
__UpperCamelCase : Union[str, Any] =0
__UpperCamelCase : str =str(a_ )
while len(a_ ) != 1:
__UpperCamelCase : Any =[int(a_ ) for i in num_string]
__UpperCamelCase : List[Any] =0
for i in range(0 ,len(a_ ) ):
total += numbers[i]
__UpperCamelCase : Any =str(a_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str:
super().__init__()
A_ : Optional[Any] = pad_token_id
A_ : List[Any] = max_length
A_ : str = vocab
A_ : Union[str, Any] = merges
A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int:
A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
A_ : Dict = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str:
A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]:
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any:
A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase )
A_ : Any = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : Tuple = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 344 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) )
class __snake_case :
def __init__( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=1_3 , __lowerCAmelCase : List[Any]=6_4 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : int="swish" , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : int=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=1_0 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=0.25 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : List[str]=0.0 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : Union[str, Any] = patch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : Union[str, Any] = make_divisible(5_1_2 * width_multiplier , divisor=8 )
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Dict = conv_kernel_size
_lowerCamelCase : List[Any] = output_stride
_lowerCamelCase : Any = classifier_dropout_prob
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = scope
_lowerCamelCase : Any = width_multiplier
_lowerCamelCase : int = ffn_dropout
_lowerCamelCase : Union[str, Any] = attn_dropout
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCamelCase : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = MobileViTVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : Optional[Any] = MobileViTVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : List[str] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ : int = False
snake_case__ : List[str] = False
snake_case__ : Tuple = False
snake_case__ : int = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = MobileViTVaModelTester(self )
_lowerCamelCase : Union[str, Any] = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : List[Any] = outputs.hidden_states
_lowerCamelCase : List[str] = 5
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCamelCase : Union[str, Any] = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[Any] = MobileViTVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Any = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : int = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_lowerCamelCase : Any = model.to(__lowerCAmelCase )
_lowerCamelCase : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : Union[str, Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.logits
# verify the logits
_lowerCamelCase : Dict = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_lowerCamelCase : Tuple = model.to(__lowerCAmelCase )
_lowerCamelCase : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase )
_lowerCamelCase : List[str] = outputs.logits.detach().cpu()
_lowerCamelCase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(5_0, 6_0)] )
_lowerCamelCase : Dict = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
_lowerCamelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
_lowerCamelCase : str = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 72 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
a =logging.getLogger(__name__)
@dataclass
class A_ :
_UpperCAmelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input training data file (a text file).'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase ( self : str):
if self.train_file is not None:
__lowerCamelCase : str = self.train_file.split('.')[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCamelCase : List[str] = self.validation_file.split('.')[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
_UpperCAmelCase : PreTrainedTokenizerBase
_UpperCAmelCase : Union[bool, str, PaddingStrategy] = True
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
def __call__( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Union[str, Any] = 'label' if 'label' in features[0].keys() else 'labels'
__lowerCamelCase : Any = [feature.pop(SCREAMING_SNAKE_CASE__) for feature in features]
__lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = len(features[0]['input_ids'])
__lowerCamelCase : Optional[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(SCREAMING_SNAKE_CASE__)] for feature in features
]
__lowerCamelCase : List[Any] = list(chain(*SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[str] = self.tokenizer.pad(
SCREAMING_SNAKE_CASE__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
__lowerCamelCase : Optional[int] = {k: v.view(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,-1) for k, v in batch.items()}
# Add back labels
__lowerCamelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__ ,dtype=torch.intaa)
return batch
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowerCamelCase__ , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCamelCase : int = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowerCamelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCamelCase : Union[str, Any] = {}
if data_args.train_file is not None:
__lowerCamelCase : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
__lowerCamelCase : List[str] = data_args.validation_file
__lowerCamelCase : Optional[Any] = data_args.train_file.split('.' )[-1]
__lowerCamelCase : Optional[Any] = load_dataset(
lowerCamelCase__ , data_files=lowerCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCamelCase : Optional[Any] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCamelCase : Optional[Any] = [F"ending{i}" for i in range(4 )]
__lowerCamelCase : int = 'sent1'
__lowerCamelCase : Optional[Any] = 'sent2'
if data_args.max_seq_length is None:
__lowerCamelCase : List[Any] = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__lowerCamelCase : Union[str, Any] = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
__lowerCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase__ ):
__lowerCamelCase : Any = [[context] * 4 for context in examples[context_name]]
__lowerCamelCase : Dict = examples[question_header_name]
__lowerCamelCase : List[str] = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(lowerCamelCase__ )
]
# Flatten out
__lowerCamelCase : Any = list(chain(*lowerCamelCase__ ) )
__lowerCamelCase : List[str] = list(chain(*lowerCamelCase__ ) )
# Tokenize
__lowerCamelCase : List[Any] = tokenizer(
lowerCamelCase__ , lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__lowerCamelCase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
__lowerCamelCase : Optional[Any] = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
__lowerCamelCase : Any = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__lowerCamelCase : Union[str, Any] = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__lowerCamelCase : Tuple = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__lowerCamelCase : Optional[int] = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
__lowerCamelCase : Optional[int] = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__lowerCamelCase : Dict = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCamelCase : Dict = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase__ ):
__lowerCamelCase , __lowerCamelCase : List[str] = eval_predictions
__lowerCamelCase : Union[str, Any] = np.argmax(lowerCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCamelCase : List[str] = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , )
# Training
if training_args.do_train:
__lowerCamelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__lowerCamelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCamelCase : int = last_checkpoint
__lowerCamelCase : Optional[int] = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCamelCase : int = train_result.metrics
__lowerCamelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
__lowerCamelCase : Tuple = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics('train' , lowerCamelCase__ )
trainer.save_metrics('train' , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase : Tuple = trainer.evaluate()
__lowerCamelCase : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics('eval' , lowerCamelCase__ )
trainer.save_metrics('eval' , lowerCamelCase__ )
__lowerCamelCase : Any = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
) | 74 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( a_ , a_ ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def a_ ( __snake_case : Dict=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
lowerCamelCase_ =subparsers.add_parser('''test''' )
else:
lowerCamelCase_ =argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=__snake_case , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__snake_case )
return parser
def a_ ( __snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
lowerCamelCase_ =script_name
else:
lowerCamelCase_ =F'''--config_file={args.config_file} {script_name}'''
lowerCamelCase_ =['''accelerate-launch'''] + test_args.split()
lowerCamelCase_ =execute_subprocess_async(__snake_case , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def a_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =test_command_parser()
lowerCamelCase_ =parser.parse_args()
test_command(__snake_case )
if __name__ == "__main__":
main()
| 75 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any:
A_ : List[Any] = parent
A_ : int = config_class
A_ : int = has_text_modality
A_ : str = kwargs
A_ : int = common_properties
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : Optional[int] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
A_ : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = self.config_class(**self.inputs_dict )
A_ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" )
config_first.to_json_file(_lowerCamelCase )
A_ : Dict = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : List[Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A_ : str = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_class.is_composition:
return
A_ : Dict = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = copy.deepcopy(_lowerCamelCase )
A_ : Tuple = self.config_class(**_lowerCamelCase )
A_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 344 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='speech_to_text'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] , a : Optional[int]=1_0000 , a : Any=12 , a : List[Any]=2048 , a : Any=4 , a : str=6 , a : List[str]=2048 , a : str=4 , a : Tuple=0.0 , a : Dict=0.0 , a : Union[str, Any]=True , a : Any=True , a : Tuple="relu" , a : int=256 , a : Dict=0.1 , a : int=0.0 , a : List[str]=0.0 , a : Dict=0.02 , a : Tuple=2 , a : Tuple=True , a : Optional[Any]=1 , a : int=0 , a : Tuple=2 , a : str=6000 , a : List[Any]=1024 , a : int=2 , a : Optional[Any]=(5, 5) , a : Dict=1024 , a : int=80 , a : Optional[int]=1 , **a : str , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : int = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = decoder_layers
SCREAMING_SNAKE_CASE : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = dropout
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE : str = activation_function
SCREAMING_SNAKE_CASE : Any = init_std
SCREAMING_SNAKE_CASE : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE : int = decoder_layerdrop
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : str = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : Union[str, Any] = max_source_positions
SCREAMING_SNAKE_CASE : str = max_target_positions
SCREAMING_SNAKE_CASE : Optional[int] = num_conv_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = list(a )
SCREAMING_SNAKE_CASE : Optional[int] = conv_channels
SCREAMING_SNAKE_CASE : Dict = input_feat_per_channel
SCREAMING_SNAKE_CASE : Optional[Any] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , **a , ) | 76 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def a_ ( _lowerCAmelCase : Callable[[int | float], int | float] , _lowerCAmelCase : int | float , _lowerCAmelCase : int | float , _lowerCAmelCase : int = 100 , ):
'''simple docstring'''
lowercase__ : Dict = x_start
lowercase__ : Union[str, Any] = fnc(_lowerCAmelCase )
lowercase__ : Optional[Any] = 0.0
for _ in range(_lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ : Union[str, Any] = (x_end - x_start) / steps + xa
lowercase__ : Union[str, Any] = fnc(_lowerCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ : Union[str, Any] = xa
lowercase__ : int = fxa
return length
if __name__ == "__main__":
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
_UpperCamelCase : str = 10
while i <= 10_00_00:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 77 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]:
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = num_channels
A_ : Tuple = embeddings_size
A_ : str = hidden_sizes
A_ : Optional[Any] = depths
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_act
A_ : Optional[Any] = num_labels
A_ : str = scope
A_ : Optional[int] = len(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Dict = TFRegNetModel(config=_lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = self.num_labels
A_ : int = TFRegNetForImageClassification(_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = TFRegNetModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase_ ( self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Dict = layer_type
A_ : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" )
# forward pass
A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
A_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
| 344 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """llama"""
__UpperCamelCase = ["""past_key_values"""]
def __init__( self :Tuple , lowercase_ :Any=3_20_00 , lowercase_ :Tuple=40_96 , lowercase_ :Any=1_10_08 , lowercase_ :int=32 , lowercase_ :str=32 , lowercase_ :Optional[Any]=None , lowercase_ :Dict="silu" , lowercase_ :Any=20_48 , lowercase_ :Tuple=0.02 , lowercase_ :Tuple=1E-6 , lowercase_ :str=True , lowercase_ :Dict=0 , lowercase_ :Optional[Any]=1 , lowercase_ :List[str]=2 , lowercase_ :Union[str, Any]=1 , lowercase_ :List[Any]=False , lowercase_ :Union[str, Any]=None , **lowercase_ :int , ) -> int:
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase = num_attention_heads
UpperCAmelCase = num_key_value_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = rms_norm_eps
UpperCAmelCase = pretraining_tp
UpperCAmelCase = use_cache
UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def UpperCAmelCase__ ( self :List[str] ) -> Any:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
UpperCAmelCase = self.rope_scaling.get('type' , lowercase_ )
UpperCAmelCase = self.rope_scaling.get('factor' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 78 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = n * (n + 1) * (2 * n + 1) / 6
A_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 344 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase__ : int = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : Any = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344 | 0 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _A ( lowercase ):
"""simple docstring"""
a ={}
a =job['''started_at''']
a =job['''completed_at''']
a =date_parser.parse(lowercase )
a =date_parser.parse(lowercase )
a =round((end_datetime - start_datetime).total_seconds() / 60.0 )
a =start
a =end
a =duration_in_min
return job_info
def _A ( lowercase , lowercase=None ):
"""simple docstring"""
a =None
if token is not None:
a ={'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
a =f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
a =requests.get(lowercase , headers=lowercase ).json()
a ={}
try:
job_time.update({job['''name''']: extract_time_from_single_job(lowercase ) for job in result['''jobs''']} )
a =math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(lowercase ):
a =requests.get(url + f'''&page={i + 2}''' , headers=lowercase ).json()
job_time.update({job['''name''']: extract_time_from_single_job(lowercase ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
lowerCamelCase_ : Any = parser.parse_args()
lowerCamelCase_ : Dict = get_job_time(args.workflow_run_id)
lowerCamelCase_ : Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'{k}: {v["duration"]}') | 81 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) -> str:
A_ : Optional[int] = parent
A_ : Dict = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = patch_size
A_ : List[str] = num_channels
A_ : List[Any] = is_training
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = hidden_size
A_ : str = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Any = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Dict = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : str = scope
A_ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : Tuple = (image_size // patch_size) ** 2
A_ : Union[str, Any] = num_patches + 2
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[str] = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : int = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Dict = 1
A_ : Optional[int] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = self.type_sequence_label_size
A_ : Tuple = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Dict = 1
A_ : Any = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = DeiTModelTester(self )
A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]:
A_ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : List[str] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Any = False
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A_ : List[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Union[str, Any] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
A_ : Dict = problem_type["""title"""]
A_ : List[Any] = problem_type["""num_labels"""]
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
A_ : Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A_ : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
A_ : List[str] = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
A_ : Optional[int] = self.default_image_processor
A_ : str = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Any = model(**_lowerCamelCase )
# verify the logits
A_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : List[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : Union[str, Any] = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : List[Any] = model(_lowerCamelCase )
| 344 | 0 |
import math
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCAmelCase = range(3 , int(math.sqrt(snake_case ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _UpperCAmelCase ( snake_case , snake_case=1 , **snake_case ):
"""simple docstring"""
_lowerCAmelCase = factor * value
_lowerCAmelCase = value
while not is_prime(snake_case ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **snake_case )
return value
| 82 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 32 , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowerCamelCase = True , _lowerCamelCase=7 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=3 , ) -> Union[str, Any]:
A_ : Optional[int] = parent
A_ : Union[str, Any] = do_resize
A_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 288}
A_ : Tuple = size_divisor
A_ : List[Any] = do_rescale
A_ : Dict = rescale_factor
A_ : List[Any] = do_normalize
A_ : Dict = do_center_crop
A_ : Optional[Any] = image_mean
A_ : List[str] = image_std
A_ : str = do_pad
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = min_resolution
A_ : Union[str, Any] = max_resolution
def UpperCAmelCase_ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
if not batched:
A_ : Union[str, Any] = self.size["""shortest_edge"""]
A_ : Dict = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
A_ , A_ : Optional[Any] = image.size
else:
A_ , A_ : int = image.shape[1], image.shape[2]
A_ : Optional[int] = size / min(_lowerCamelCase , _lowerCamelCase )
if h < w:
A_ , A_ : Optional[Any] = size, scale * w
else:
A_ , A_ : Dict = scale * h, size
A_ : Union[str, Any] = int((1333 / 800) * size )
if max(_lowerCamelCase , _lowerCamelCase ) > max_size:
A_ : str = max_size / max(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = newh * scale
A_ : Dict = neww * scale
A_ , A_ : str = int(newh + 0.5 ), int(neww + 0.5 )
A_ , A_ : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
A_ : Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size_divisor""" ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image processor
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image processor
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowercase__ :
def __init__( self : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[Any]=13 ,lowerCamelCase__ : Dict=10 ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : List[Any]=2 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Union[str, Any]=32 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : Tuple=37 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : Union[str, Any]=10 ,lowerCamelCase__ : Any=0.0_2 ,lowerCamelCase__ : int=0.9 ,lowerCamelCase__ : Optional[int]=None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = parent
_UpperCamelCase : Any = batch_size
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Any = patch_size
_UpperCamelCase : str = tubelet_size
_UpperCamelCase : Dict = num_frames
_UpperCamelCase : str = is_training
_UpperCamelCase : str = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Any = type_sequence_label_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[str] = mask_ratio
_UpperCamelCase : Any = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_UpperCamelCase : int = (image_size // patch_size) ** 2
_UpperCamelCase : int = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_UpperCamelCase : List[Any] = int(mask_ratio * self.seq_length )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = None
if self.use_labels:
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : List[str] = VideoMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = VideoMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_UpperCamelCase : Union[str, Any] = torch.ones((self.num_masks,) )
_UpperCamelCase : Any = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_UpperCamelCase : Optional[int] = mask.expand(self.batch_size ,-1 ).bool()
_UpperCamelCase : List[str] = model(lowerCamelCase__ ,lowerCamelCase__ )
# model only returns predictions for masked patches
_UpperCamelCase : Optional[int] = mask.sum().item()
_UpperCamelCase : List[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : str = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Union[str, Any] = config_and_inputs
_UpperCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase__ = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Tuple = VideoMAEModelTester(self )
_UpperCamelCase : str = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
_UpperCamelCase : List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_UpperCamelCase : List[Any] = torch.ones((self.model_tester.num_masks,) )
_UpperCamelCase : int = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_UpperCamelCase : List[Any] = mask.expand(self.model_tester.batch_size ,-1 ).bool()
_UpperCamelCase : int = bool_masked_pos.to(lowerCamelCase__ )
if return_labels:
if model_class in [
*get_values(lowerCamelCase__ ),
]:
_UpperCamelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__ )
return inputs_dict
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_UpperCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = model_class(lowerCamelCase__ )
_UpperCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Any = [*signature.parameters.keys()]
_UpperCamelCase : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : str = VideoMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = True
for model_class in self.all_model_classes:
_UpperCamelCase : int = self.model_tester.seq_length - self.model_tester.num_masks
_UpperCamelCase : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = True
_UpperCamelCase : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[Any] = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Any = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : Optional[int] = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
_UpperCamelCase : int = len(lowerCamelCase__ )
# Check attention is always last and order is fine
_UpperCamelCase : Tuple = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(out_len + 1 ,len(lowerCamelCase__ ) )
_UpperCamelCase : List[str] = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ):
_UpperCamelCase : Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : Dict = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : Optional[int] = outputs.hidden_states
_UpperCamelCase : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
_UpperCamelCase : Tuple = self.model_tester.seq_length - self.model_tester.num_masks
_UpperCamelCase : int = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : str = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[Any] = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def A__ ( ):
_UpperCamelCase : List[Any] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_UpperCamelCase : Any = np.load(UpperCAmelCase_ )
return list(UpperCAmelCase_ )
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : int = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.default_image_processor
_UpperCamelCase : int = prepare_video()
_UpperCamelCase : int = image_processor(lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**lowerCamelCase__ )
# verify the logits
_UpperCamelCase : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(lowerCamelCase__ )
_UpperCamelCase : List[str] = self.default_image_processor
_UpperCamelCase : str = prepare_video()
_UpperCamelCase : int = image_processor(lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# add boolean mask, indicating which patches to mask
_UpperCamelCase : Optional[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' ,filename='bool_masked_pos.pt' )
_UpperCamelCase : Optional[Any] = torch.load(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(**lowerCamelCase__ )
# verify the logits
_UpperCamelCase : Tuple = torch.Size([1, 1408, 1536] )
_UpperCamelCase : Optional[int] = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ,device=lowerCamelCase__ )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,lowerCamelCase__ ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_UpperCamelCase : List[Any] = torch.tensor([0.5_1_4_2] ,device=lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss ,lowerCamelCase__ ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_UpperCamelCase : Optional[Any] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ,norm_pix_loss=lowerCamelCase__ ).to(
lowerCamelCase__ )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = torch.tensor(torch.tensor([0.6_4_6_9] ) ,device=lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss ,lowerCamelCase__ ,atol=1E-4 ) )
| 83 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
A_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ : Tuple = int(input('Enter number of vertices: '))
UpperCamelCase__ : int = int(input('Enter number of edges: '))
UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ : Union[str, Any] = int(input('Enter source:'))
UpperCamelCase__ : int = int(input('Enter destination:'))
UpperCamelCase__ : Optional[Any] = float(input('Enter weight:'))
UpperCamelCase__ : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 344 | 0 |
"""simple docstring"""
from math import isclose, sqrt
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> tuple[float, float, float]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = point_y / 4 / point_x
lowerCAmelCase_ :Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase_ :Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase_ :str = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase_ :Tuple = outgoing_gradient**2 + 4
lowerCAmelCase_ :Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase_ :str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
lowerCAmelCase_ :Optional[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase_ :Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase_ :List[Any] = x_minus if isclose(lowercase__ , lowercase__ ) else x_plus
lowerCAmelCase_ :List[str] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _snake_case ( lowercase__ : float = 1.4 , lowercase__ : float = -9.6 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :float = first_x_coord
lowerCAmelCase_ :float = first_y_coord
lowerCAmelCase_ :float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_point(lowercase__ , lowercase__ , lowercase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : List[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 344 | 0 |
'''simple docstring'''
import string
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = ""
for i in sequence:
snake_case_ = ord(snake_case )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = string.ascii_letters
snake_case_ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(snake_case )] if c in letters else c for c in sequence )
def UpperCamelCase_( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
snake_case_ = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=snake_case )} seconds' )
print(f'> atbash(): {timeit("atbash(printable)" , setup=snake_case )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 85 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'decord' )
self.check_model_type(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : Union[str, Any] = {}
if frame_sampling_rate is not None:
__lowerCAmelCase : Optional[int] = frame_sampling_rate
if num_frames is not None:
__lowerCAmelCase : int = num_frames
__lowerCAmelCase : Any = {}
if top_k is not None:
__lowerCAmelCase : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 ):
if num_frames is None:
__lowerCAmelCase : Union[str, Any] = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
__lowerCAmelCase : Tuple = BytesIO(requests.get(_SCREAMING_SNAKE_CASE ).content )
__lowerCAmelCase : str = VideoReader(_SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : str = num_frames * frame_sampling_rate - 1
__lowerCAmelCase : Union[str, Any] = np.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num=_SCREAMING_SNAKE_CASE , dtype=np.intaa )
__lowerCAmelCase : int = videoreader.get_batch(_SCREAMING_SNAKE_CASE ).asnumpy()
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ):
if top_k > self.model.config.num_labels:
__lowerCAmelCase : List[str] = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase : Union[str, Any] = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase , __lowerCAmelCase : Any = probs.topk(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__lowerCAmelCase : Any = scores.tolist()
__lowerCAmelCase : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] | 86 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vision-encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> str:
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
A_ : Optional[int] = kwargs.pop("""encoder""" )
A_ : List[str] = encoder_config.pop("""model_type""" )
A_ : str = kwargs.pop("""decoder""" )
A_ : Optional[Any] = decoder_config.pop("""model_type""" )
A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : int = True
A_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
A_ : Optional[Any] = OrderedDict()
A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
import torch
A_ : Optional[int] = OrderedDict()
A_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
A_ , A_ : str = dummy_input["""input_ids"""].shape
A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
A_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
A_ : List[str] = dummy_input.pop("""attention_mask""" )
A_ : Optional[int] = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> None:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig:
A_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 344 | 0 |
import math
def lowercase_ ( _lowerCamelCase : int):
return math.sqrt(_lowerCamelCase) * math.sqrt(_lowerCamelCase) == num
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : List[Any] = 0
lowercase__ : Tuple = n
while left <= right:
lowercase__ : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowercase__ : Any = mid - 1
else:
lowercase__ : Union[str, Any] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.'
UpperCamelCase__ : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A_ : int = bytes(a_ , """utf-8""" )
with zstd.open(a_ , """wb""" ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Any = input_paths[compression_format]
A_ : Tuple = tmp_path / """cache"""
A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
A_ : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
A_ : Optional[Any] = f.read()
with open(a_ ) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = """custom_cache"""
A_ : List[str] = """custom_extracted_dir"""
A_ : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : List[Any] = xz_file
A_ : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
A_ : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
A_ : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with pytest.raises(a_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
http_get("""https://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head("""s3://huggingface.co""" )
| 344 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase : List[Any] = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__lowerCAmelCase = {'''allegro/herbert-base-cased''': 514}
__lowerCAmelCase = {}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = HerbertTokenizer
def __init__( self : Dict ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : int=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Optional[int]="<s>" ,_UpperCAmelCase : str="<unk>" ,_UpperCAmelCase : Dict="<pad>" ,_UpperCAmelCase : List[Any]="<mask>" ,_UpperCAmelCase : Optional[int]="</s>" ,**_UpperCAmelCase : Union[str, Any] ,):
super().__init__(
_UpperCAmelCase ,_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : str ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : List[Any] = [self.cls_token_id]
_a : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def __lowercase ( self : int ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : Tuple = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
_a : Union[str, Any] = self._tokenizer.model.save(_UpperCAmelCase ,name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 89 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = jnp.ones((batch_size, length) ) / length
return scores
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = 20
__lowerCamelCase = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase__ )
# tweak scores to not be uniform anymore
__lowerCamelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__lowerCamelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__lowerCamelCase = jax.nn.softmax(lowerCamelCase__ , axis=-1 )
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__lowerCamelCase = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase__ , scores.copy() , cur_len=lowerCamelCase__ ) , axis=-1 )
__lowerCamelCase = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase__ , scores.copy() , cur_len=lowerCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = 10
__lowerCamelCase = 2
# create ramp distribution
__lowerCamelCase = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
__lowerCamelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__lowerCamelCase = 5
__lowerCamelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__lowerCamelCase = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, length) ).copy()
__lowerCamelCase = top_k_warp_safety_check(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = 10
__lowerCamelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__lowerCamelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
__lowerCamelCase = np.exp(top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__lowerCamelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
__lowerCamelCase = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__lowerCamelCase = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
__lowerCamelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__lowerCamelCase = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__ )
# check that min length is applied at length 5
__lowerCamelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
__lowerCamelCase = 5
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = min_dist_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = 15
__lowerCamelCase = min_dist_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
# check that all scores are -inf except the bos_token_id score
__lowerCamelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
__lowerCamelCase = 1
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__lowerCamelCase = 3
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = 5
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
__lowerCamelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
__lowerCamelCase = 4
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__lowerCamelCase = 3
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = 4
__lowerCamelCase = 10
__lowerCamelCase = 15
__lowerCamelCase = 2
__lowerCamelCase = 1
__lowerCamelCase = 15
# dummy input_ids and scores
__lowerCamelCase = ids_tensor((batch_size, sequence_length) , lowerCamelCase__ )
__lowerCamelCase = input_ids.copy()
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = scores.copy()
# instantiate all dist processors
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = 10
# no processor list
__lowerCamelCase = temp_dist_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = min_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = bos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = eos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# with processor list
__lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCamelCase = processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = 4
__lowerCamelCase = 10
__lowerCamelCase = 15
__lowerCamelCase = 2
__lowerCamelCase = 1
__lowerCamelCase = 15
# dummy input_ids and scores
__lowerCamelCase = ids_tensor((batch_size, sequence_length) , lowerCamelCase__ )
__lowerCamelCase = input_ids.copy()
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = scores.copy()
# instantiate all dist processors
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = 10
# no processor list
def run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = temp_dist_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = min_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = bos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = eos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
return scores
# with processor list
def run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCamelCase = processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
return scores
__lowerCamelCase = jax.jit(lowerCamelCase__ )
__lowerCamelCase = jax.jit(lowerCamelCase__ )
__lowerCamelCase = jitted_run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = jitted_run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 90 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ : Any = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase_ : str , **lowercase_ : List[Any]):
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : Optional[int]=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = {}
SCREAMING_SNAKE_CASE_ : Tuple = {}
if prompt is not None:
SCREAMING_SNAKE_CASE_ : Tuple = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE_ : int = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Any , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Any):
'''simple docstring'''
return super().__call__(lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str]=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = load_image(lowercase_)
if prompt is not None:
if not isinstance(lowercase_ , lowercase_):
raise ValueError(
F'Received an invalid text input, got - {type(lowercase_)} - but expected a single string. '
'''Note also that one single text can be provided for conditional image to text generation.''')
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_).input_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(lowercase_).unsqueeze(0)
model_inputs.update({'''input_ids''': input_ids})
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE_ : str = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE_ : int = self.image_processor(images=lowercase_ , return_tensors=self.framework)
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , return_tensors=self.framework)
model_inputs.update(lowercase_)
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation')
else:
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE_ : List[str] = None
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int]=None):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , lowercase_)
and all(x is None for x in model_inputs['''input_ids'''])
):
SCREAMING_SNAKE_CASE_ : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE_ : int = model_inputs.pop(self.model.main_input_name)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model.generate(lowercase_ , **lowercase_ , **lowercase_)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''generated_text''': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , )
}
records.append(lowercase_)
return records
| 91 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]:
A_ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : List[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
A_ : Dict = scheduler_class(**_lowerCamelCase )
A_ : Dict = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Any = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[Any] = pred_prev_sample
A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(25 )
A_ : List[str] = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
if i + 1 == timesteps.shape[0]:
A_ : List[str] = None
else:
A_ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Optional[Any] = pred_prev_sample
A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
pass
| 344 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = DeiTForMaskedImageModeling(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForMaskedImageModeling(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = DeiTForImageClassification(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForImageClassification(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_a : int = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_a : Optional[Any] = False
_a : Tuple = False
_a : Tuple = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_A )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
__lowerCAmelCase = model(**_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(_A )
model.gradient_checkpointing_enable()
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
__lowerCAmelCase = model(**_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_A ),
*get_values(_A ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ):
__lowerCAmelCase = problem_type["title"]
__lowerCAmelCase = problem_type["num_labels"]
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
__lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_A ) as warning_list:
__lowerCAmelCase = model(**_A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DeiTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
_A )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**_A )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
__lowerCAmelCase = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="pt" )
__lowerCAmelCase = inputs.pixel_values.to(_A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowerCAmelCase = model(_A )
| 92 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowercase : Any = (7_2_0, 1_2_8_0) # Height, Width
_lowercase : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowercase : str = 1 / 1_0_0
_lowercase : Any = ""
_lowercase : Union[str, Any] = ""
_lowercase : Optional[int] = ""
_lowercase : List[Any] = 2_5_0
def snake_case_ ( ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = get_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for index in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 4 )
lowercase_ , lowercase_ , lowercase_ : Any = update_image_and_anno(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , filter_scale=__SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ : int = random_chars(32 )
lowercase_ : str = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase_ : int = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase_ : List[Any] = []
for anno in new_annos:
lowercase_ : List[Any] = anno[3] - anno[1]
lowercase_ : List[str] = anno[4] - anno[2]
lowercase_ : Dict = anno[1] + width / 2
lowercase_ : Dict = anno[2] + height / 2
lowercase_ : int = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__SCREAMING_SNAKE_CASE )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = []
for label_file in glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
lowercase_ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__SCREAMING_SNAKE_CASE ) as in_file:
lowercase_ : List[str] = in_file.readlines()
lowercase_ : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , F'''{label_name}.jpg''' )
lowercase_ : Optional[int] = []
for obj_list in obj_lists:
lowercase_ : List[str] = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase_ : Optional[int] = float(obj[1] ) - float(obj[3] ) / 2
lowercase_ : Any = float(obj[2] ) - float(obj[4] ) / 2
lowercase_ : str = float(obj[1] ) + float(obj[3] ) / 2
lowercase_ : List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
return img_paths, labels
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[float, float] , __SCREAMING_SNAKE_CASE : float = 0.0 , ):
"""simple docstring"""
lowercase_ : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase_ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : Optional[int] = int(scale_x * output_size[1] )
lowercase_ : Dict = int(scale_y * output_size[0] )
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = []
for i, index in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = all_img_list[index]
path_list.append(__SCREAMING_SNAKE_CASE )
lowercase_ : int = all_annos[index]
lowercase_ : Dict = cva.imread(__SCREAMING_SNAKE_CASE )
if i == 0: # top-left
lowercase_ : Optional[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
lowercase_ : Tuple = img
for bbox in img_annos:
lowercase_ : Optional[int] = bbox[1] * scale_x
lowercase_ : Optional[Any] = bbox[2] * scale_y
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase_ : Dict = cva.resize(__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
lowercase_ : Dict = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Dict = bbox[2] * scale_y
lowercase_ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase_ : List[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : Any = bbox[1] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase_ : int = cva.resize(
__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Any = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase_ : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowercase_ : Any = ascii_lowercase + digits
return "".join(random.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 93 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str:
super().__init__()
A_ : Optional[Any] = pad_token_id
A_ : List[Any] = max_length
A_ : str = vocab
A_ : Union[str, Any] = merges
A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int:
A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
A_ : Dict = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str:
A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]:
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any:
A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase )
A_ : Any = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : Tuple = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 344 | 0 |
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a :Tuple = F'''Expected string as input, found {type(UpperCAmelCase_ )}'''
raise ValueError(UpperCAmelCase_ )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a :List[Any] = F'''Expected boolean as use_pascal parameter, found {type(UpperCAmelCase_ )}'''
raise ValueError(UpperCAmelCase_ )
a :int = input_str.split('''_''' )
a :Any = 0 if use_pascal else 1
a :int = words[start_index:]
a :Tuple = [word[0].upper() + word[1:] for word in words_to_capitalize]
a :Optional[int] = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 94 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__):
_lowercase : Dict = """pixel_values"""
_lowercase : List[Any] = False
_lowercase : int = TimmBackboneConfig
def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCAmelCase__ )
a__ : Optional[Any] =config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(lowerCAmelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
a__ : List[Any] =getattr(lowerCAmelCase__ , "use_pretrained_backbone" , lowerCAmelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
a__ : Dict =config.out_indices if getattr(lowerCAmelCase__ , "out_indices" , lowerCAmelCase__ ) is not None else (-1,)
a__ : List[Any] =timm.create_model(
config.backbone , pretrained=lowerCAmelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase__ , **lowerCAmelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a__ : Optional[int] =self._backbone.return_layers
a__ : Union[str, Any] ={layer["module"]: str(lowerCAmelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase__ )
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
a__ : Any =kwargs.pop("config" , TimmBackboneConfig() )
a__ : List[Any] =kwargs.pop("use_timm_backbone" , lowerCAmelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
a__ : Optional[Any] =kwargs.pop("num_channels" , config.num_channels )
a__ : List[Any] =kwargs.pop("features_only" , config.features_only )
a__ : List[Any] =kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
a__ : Any =kwargs.pop("out_indices" , config.out_indices )
a__ : List[Any] =TimmBackboneConfig(
backbone=lowerCAmelCase__ , num_channels=lowerCAmelCase__ , features_only=lowerCAmelCase__ , use_pretrained_backbone=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , )
return super()._from_config(lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
pass
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a__ : int =return_dict if return_dict is not None else self.config.use_return_dict
a__ : List[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ : Any =output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a__ : List[str] =self._all_layers
a__ : Optional[Any] =self._backbone(lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Union[str, Any] =self._return_layers
a__ : str =tuple(hidden_states[i] for i in self.out_indices )
else:
a__ : Union[str, Any] =self._backbone(lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : List[str] =None
a__ : Optional[Any] =tuple(lowerCAmelCase__ )
a__ : int =tuple(lowerCAmelCase__ ) if hidden_states is not None else None
if not return_dict:
a__ : str =(feature_maps,)
if output_hidden_states:
a__ : Union[str, Any] =output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , attentions=lowerCAmelCase__ )
| 95 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# Initialise PyTorch model
_lowerCamelCase : Dict = RemBertConfig.from_json_file(lowercase__ )
print('Building PyTorch model from configuration: {}'.format(str(lowercase__ ) ) )
_lowerCamelCase : Any = RemBertModel(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowercase__ ) )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 96 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( a_ , a_ ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any:
A_ : List[Any] = parent
A_ : int = config_class
A_ : int = has_text_modality
A_ : str = kwargs
A_ : int = common_properties
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : Optional[int] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
A_ : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = self.config_class(**self.inputs_dict )
A_ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" )
config_first.to_json_file(_lowerCamelCase )
A_ : Dict = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : List[Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A_ : str = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_class.is_composition:
return
A_ : Dict = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = copy.deepcopy(_lowerCamelCase )
A_ : Tuple = self.config_class(**_lowerCamelCase )
A_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 344 | 0 |
"""simple docstring"""
from itertools import product
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = sides_number
UpperCAmelCase__ = max_face_number * dice_number
UpperCAmelCase__ = [0] * (max_total + 1)
UpperCAmelCase__ = 1
UpperCAmelCase__ = range(lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(lowerCamelCase , repeat=lowerCamelCase ):
UpperCAmelCase__ = sum(lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ):
UpperCAmelCase__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCAmelCase__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 9
UpperCAmelCase__ = 4 * 9
UpperCAmelCase__ = 6
for peter_total in range(lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCAmelCase__ = (4**9) * (6**6)
UpperCAmelCase__ = peter_wins_count / total_games_number
UpperCAmelCase__ = round(lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]:
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = num_channels
A_ : Tuple = embeddings_size
A_ : str = hidden_sizes
A_ : Optional[Any] = depths
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_act
A_ : Optional[Any] = num_labels
A_ : str = scope
A_ : Optional[int] = len(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Dict = TFRegNetModel(config=_lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = self.num_labels
A_ : int = TFRegNetForImageClassification(_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = TFRegNetModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase_ ( self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Dict = layer_type
A_ : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" )
# forward pass
A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
A_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
| 344 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__magic_name__ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__magic_name__ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
__magic_name__ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self):
if version.parse(scb.__version__) < version.parse("""1.4.12"""):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ):
__SCREAMING_SNAKE_CASE = len(references[0])
if any(len(lowerCAmelCase__) != references_per_prediction for refs in references):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""")
__SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase__)]
__SCREAMING_SNAKE_CASE = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 100 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = n * (n + 1) * (2 * n + 1) / 6
A_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 344 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowercase__ :Optional[Any] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
lowercase__ :List[Any] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
lowercase__ :int = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
lowercase__ :List[str] = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
lowercase__ :List[str] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
lowercase__ :str = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
lowercase__ :Tuple = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase , lowercase = randrange(len(lowerCAmelCase__ ) ), randrange(len(lowerCAmelCase__ ) )
lowercase = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase , lowercase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCamelCase ( lowerCAmelCase__ = 100 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowerCAmelCase__ ))
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = PokerHand(lowerCAmelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ ).compare_with(PokerHand(lowerCAmelCase__ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ ).compare_with(PokerHand(lowerCAmelCase__ ) ) == expected
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = [PokerHand(lowerCAmelCase__ ) for hand in SORTED_HANDS]
lowercase = poker_hands.copy()
shuffle(lowerCAmelCase__ )
lowercase = chain(sorted(lowerCAmelCase__ ) )
for index, hand in enumerate(lowerCAmelCase__ ):
assert hand == poker_hands[index]
def UpperCamelCase ( ):
'''simple docstring'''
# Test that five high straights are compared correctly.
lowercase = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=lowerCAmelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCamelCase ( ):
'''simple docstring'''
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowercase = PokerHand('''2C 4S AS 3D 5C''' )
lowercase = True
lowercase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCamelCase ( ):
'''simple docstring'''
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowercase = 0
lowercase = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
lowercase = os.path.join(lowerCAmelCase__ , '''poker_hands.txt''' )
with open(lowerCAmelCase__ ) as file_hand:
for line in file_hand:
lowercase = line[:14].strip()
lowercase = line[15:].strip()
lowercase , lowercase = PokerHand(lowerCAmelCase__ ), PokerHand(lowerCAmelCase__ )
lowercase = player.compare_with(lowerCAmelCase__ )
if output == "Win":
answer += 1
assert answer == 376
| 101 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase__ : int = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE : Dict = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
SCREAMING_SNAKE_CASE : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__snake_case )}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Whether ot not to use whole word mask.'} )
lowerCamelCase__ =field(
default=0.1_5, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase__ =field(
default=1 / 6, metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
}, )
lowerCamelCase__ =field(
default=5, metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
lowerCamelCase__ =field(
default=-1, metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase ( _snake_case : DataTrainingArguments , _snake_case : PreTrainedTokenizer , _snake_case : bool = False , _snake_case : Optional[str] = None , ) ->Any:
"""simple docstring"""
def _dataset(_snake_case : List[Any] , _snake_case : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , ref_path=_snake_case , )
return LineByLineTextDataset(tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowercase ( ) ->List[Any]:
"""simple docstring"""
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case : Union[str, Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__snake_case : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__snake_case : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__snake_case : int = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__snake_case : List[Any] = AutoModelWithLMHead.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__snake_case : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__snake_case : Optional[int] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__snake_case : Optional[Any] = (
get_dataset(_snake_case , tokenizer=_snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__snake_case : Any = (
get_dataset(_snake_case , tokenizer=_snake_case , evaluate=_snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__snake_case : List[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=_snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__snake_case : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_snake_case , mlm_probability=data_args.mlm_probability )
else:
__snake_case : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__snake_case : Optional[int] = Trainer(
model=_snake_case , args=_snake_case , data_collator=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , prediction_loss_only=_snake_case , )
# Training
if training_args.do_train:
__snake_case : Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Dict = math.exp(eval_output['''eval_loss'''] )
__snake_case : List[Any] = {'''perplexity''': perplexity}
__snake_case : str = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_snake_case )
return results
def lowercase ( _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 102 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : str = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(A_ , '''tf_padding'''))
self.parent.assertTrue(hasattr(A_ , '''depth_multiplier'''))
class __snake_case :
def __init__( self : List[str] , A_ : Any , A_ : Tuple=1_3 , A_ : Tuple=3 , A_ : Tuple=3_2 , A_ : List[str]=0.25 , A_ : Dict=8 , A_ : Optional[Any]=8 , A_ : int=6 , A_ : Tuple=3_2 , A_ : Union[str, Any]=True , A_ : Optional[int]=True , A_ : Optional[Any]=True , A_ : Tuple="relu6" , A_ : Union[str, Any]=1_2_8_0 , A_ : List[str]=0.1 , A_ : List[Any]=0.02 , A_ : Optional[int]=True , A_ : Union[str, Any]=True , A_ : List[Any]=1_0 , A_ : Tuple=None , ):
lowerCAmelCase_ : List[str] = parent
lowerCAmelCase_ : Dict = batch_size
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : Union[str, Any] = depth_multiplier
lowerCAmelCase_ : List[str] = depth_divisible_by
lowerCAmelCase_ : List[Any] = min_depth
lowerCAmelCase_ : str = expand_ratio
lowerCAmelCase_ : str = tf_padding
lowerCAmelCase_ : str = output_stride
lowerCAmelCase_ : Optional[int] = first_layer_is_expansion
lowerCAmelCase_ : Optional[Any] = finegrained_output
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : List[str] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
lowerCAmelCase_ : Any = classifier_dropout_prob
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = scope
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : List[Any] = None
if self.use_labels:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_labels)
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : Dict):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any , A_ : Any , A_ : List[Any] , A_ : List[str] , A_ : Tuple):
lowerCAmelCase_ : int = MobileNetVaModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : str = model(A_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Tuple , A_ : List[Any] , A_ : Optional[int] , A_ : Optional[Any]):
lowerCAmelCase_ : Any = self.num_labels
lowerCAmelCase_ : List[str] = MobileNetVaForImageClassification(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[int] = model(A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[str, Any] , A_ : int , A_ : Tuple , A_ : Optional[int]):
lowerCAmelCase_ : Any = self.num_labels
lowerCAmelCase_ : Optional[int] = MobileNetVaForSemanticSegmentation(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : str = model(A_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase_ : Union[str, Any] = model(A_ , labels=A_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = config_and_inputs
lowerCAmelCase_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_a = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : List[Any] = MobileNetVaModelTester(self)
lowerCAmelCase_ : int = MobileNetVaConfigTester(self , config_class=A_ , has_text_modality=A_)
def UpperCAmelCase__ ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''')
def UpperCAmelCase__ ( self : List[Any]):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''')
def UpperCAmelCase__ ( self : int):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''')
def UpperCAmelCase__ ( self : List[str]):
pass
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = model_class(A_)
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_)
def UpperCAmelCase__ ( self : int):
def check_hidden_states_output(A_ : List[str] , A_ : int , A_ : Any):
lowerCAmelCase_ : Tuple = model_class(A_)
model.to(A_)
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Any = model(**self._prepare_for_class(A_ , A_))
lowerCAmelCase_ : str = outputs.hidden_states
lowerCAmelCase_ : List[Any] = 1_6
self.assertEqual(len(A_) , A_)
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = True
check_hidden_states_output(A_ , A_ , A_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : int = True
check_hidden_states_output(A_ , A_ , A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_)
@slow
def UpperCAmelCase__ ( self : List[str]):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[Any] = MobileNetVaModel.from_pretrained(A_)
self.assertIsNotNone(A_)
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any]):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''') if is_vision_available() else None
)
@slow
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''').to(A_)
lowerCAmelCase_ : List[str] = self.default_image_processor
lowerCAmelCase_ : List[str] = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(images=A_ , return_tensors='''pt''').to(A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : int = model(**A_)
# verify the logits
lowerCAmelCase_ : Tuple = torch.Size((1, 1_0_0_1))
self.assertEqual(outputs.logits.shape , A_)
lowerCAmelCase_ : str = torch.tensor([0.2445, -1.1993, 0.1905]).to(A_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4))
@slow
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : str = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''')
lowerCAmelCase_ : int = model.to(A_)
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''')
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Tuple = image_processor(images=A_ , return_tensors='''pt''').to(A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : str = model(**A_)
lowerCAmelCase_ : Optional[int] = outputs.logits
# verify the logits
lowerCAmelCase_ : Dict = torch.Size((1, 2_1, 6_5, 6_5))
self.assertEqual(logits.shape , A_)
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4))
| 103 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) -> str:
A_ : Optional[int] = parent
A_ : Dict = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = patch_size
A_ : List[str] = num_channels
A_ : List[Any] = is_training
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = hidden_size
A_ : str = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Any = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Dict = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : str = scope
A_ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : Tuple = (image_size // patch_size) ** 2
A_ : Union[str, Any] = num_patches + 2
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[str] = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : int = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Dict = 1
A_ : Optional[int] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = self.type_sequence_label_size
A_ : Tuple = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Dict = 1
A_ : Any = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = DeiTModelTester(self )
A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]:
A_ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : List[str] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Any = False
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A_ : List[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Union[str, Any] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
A_ : Dict = problem_type["""title"""]
A_ : List[Any] = problem_type["""num_labels"""]
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
A_ : Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A_ : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
A_ : List[str] = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
A_ : Optional[int] = self.default_image_processor
A_ : str = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Any = model(**_lowerCamelCase )
# verify the logits
A_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : List[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : Union[str, Any] = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : List[Any] = model(_lowerCamelCase )
| 344 | 0 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not isinstance(A__ , A__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
__lowercase = str(A__ )
__lowercase = ''''''.join(sorted(A__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( A__ = 99 ):
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(A__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(99)}')
| 104 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 32 , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowerCamelCase = True , _lowerCamelCase=7 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=3 , ) -> Union[str, Any]:
A_ : Optional[int] = parent
A_ : Union[str, Any] = do_resize
A_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 288}
A_ : Tuple = size_divisor
A_ : List[Any] = do_rescale
A_ : Dict = rescale_factor
A_ : List[Any] = do_normalize
A_ : Dict = do_center_crop
A_ : Optional[Any] = image_mean
A_ : List[str] = image_std
A_ : str = do_pad
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = min_resolution
A_ : Union[str, Any] = max_resolution
def UpperCAmelCase_ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
if not batched:
A_ : Union[str, Any] = self.size["""shortest_edge"""]
A_ : Dict = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
A_ , A_ : Optional[Any] = image.size
else:
A_ , A_ : int = image.shape[1], image.shape[2]
A_ : Optional[int] = size / min(_lowerCamelCase , _lowerCamelCase )
if h < w:
A_ , A_ : Optional[Any] = size, scale * w
else:
A_ , A_ : Dict = scale * h, size
A_ : Union[str, Any] = int((1333 / 800) * size )
if max(_lowerCamelCase , _lowerCamelCase ) > max_size:
A_ : str = max_size / max(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = newh * scale
A_ : Dict = neww * scale
A_ , A_ : str = int(newh + 0.5 ), int(neww + 0.5 )
A_ , A_ : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
A_ : Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size_divisor""" ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image processor
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image processor
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Dict ) ->Optional[Any]:
'''simple docstring'''
for attribute in key.split("." ):
a : Dict = getattr(_lowercase , _lowercase )
if weight_type is not None:
a : Optional[Any] = getattr(_lowercase , _lowercase ).shape
else:
a : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
a : List[str] = value
elif weight_type == "weight_g":
a : int = value
elif weight_type == "weight_v":
a : int = value
elif weight_type == "bias":
a : Tuple = value
else:
a : Union[str, Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : List[str] , _lowercase : int ) ->Any:
'''simple docstring'''
a : Dict = []
a : int = fairseq_model.state_dict()
a : Tuple = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
a : int = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == "group" , )
a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
a : Optional[Any] = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
a : List[str] = True
if "*" in mapped_key:
a : Union[str, Any] = name.split(_lowercase )[0].split("." )[-2]
a : str = mapped_key.replace("*" , _lowercase )
if "weight_g" in name:
a : Optional[int] = "weight_g"
elif "weight_v" in name:
a : Optional[Any] = "weight_v"
elif "weight" in name:
a : Tuple = "weight"
elif "bias" in name:
a : Tuple = "bias"
else:
a : Union[str, Any] = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Optional[Any] ) ->List[Any]:
'''simple docstring'''
a : List[Any] = full_name.split("conv_layers." )[-1]
a : Any = name.split("." )
a : List[str] = int(items[0] )
a : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
a : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
a : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
a : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Tuple , _lowercase : List[str]=None , _lowercase : Dict=None , _lowercase : int=True ) ->List[Any]:
'''simple docstring'''
if config_path is not None:
a : Tuple = HubertConfig.from_pretrained(_lowercase )
else:
a : Any = HubertConfig()
if is_finetuned:
if dict_path:
a : str = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a : int = target_dict.pad_index
a : Optional[int] = target_dict.bos_index
a : Dict = target_dict.eos_index
a : Optional[int] = len(target_dict.symbols )
a : List[str] = os.path.join(_lowercase , "vocab.json" )
if not os.path.isdir(_lowercase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
with open(_lowercase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowercase )
a : Optional[int] = WavaVecaCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowercase , )
a : int = True if config.feat_extract_norm == "layer" else False
a : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
a : str = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
a : int = HubertForCTC(_lowercase )
else:
a : str = HubertModel(_lowercase )
if is_finetuned:
a, a, a : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a, a, a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a : Optional[int] = model[0].eval()
recursively_load_weights(_lowercase , _lowercase , _lowercase )
hf_wavavec.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a : Optional[Any] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 105 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
A_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ : Tuple = int(input('Enter number of vertices: '))
UpperCamelCase__ : int = int(input('Enter number of edges: '))
UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ : Union[str, Any] = int(input('Enter source:'))
UpperCamelCase__ : int = int(input('Enter destination:'))
UpperCamelCase__ : Optional[Any] = float(input('Enter weight:'))
UpperCamelCase__ : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 344 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : list[list[str]] = [[] for _ in range(A_ )]
lowerCAmelCase__ : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(A_ ) <= key:
return input_string
for position, character in enumerate(A_ ):
lowerCAmelCase__ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Optional[int] = min(A_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(A_ )
lowerCAmelCase__ : str = [''''''.join(A_ ) for row in temp_grid]
lowerCAmelCase__ : int = ''''''.join(A_ )
return output_string
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : int = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
lowerCAmelCase__ : list[list[str]] = [[] for _ in range(A_ )] # generates template
for position in range(len(A_ ) ):
lowerCAmelCase__ : int = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Dict = min(A_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
lowerCAmelCase__ : Dict = 0
for row in temp_grid: # fills in the characters
lowerCAmelCase__ : List[Any] = input_string[counter : counter + len(A_ )]
grid.append(list(A_ ) )
counter += len(A_ )
lowerCAmelCase__ : Tuple = '''''' # reads as zigzag
for position in range(len(A_ ) ):
lowerCAmelCase__ : Tuple = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Optional[int] = min(A_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = {}
for key_guess in range(1 , len(A_ ) ): # tries every key
lowerCAmelCase__ : List[str] = decrypt(A_ , A_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : List[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 344 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=3 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : str=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=None , ) -> Any:
a = parent
a = batch_size
a = image_size
a = num_channels
a = embeddings_size
a = hidden_sizes
a = depths
a = is_training
a = use_labels
a = hidden_act
a = num_labels
a = scope
a = len(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> Dict:
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : int ) -> Tuple:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ) -> Dict:
a = FlaxRegNetModel(config=__lowerCamelCase )
a = model(__lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Optional[int]:
a = self.num_labels
a = FlaxRegNetForImageClassification(config=__lowerCamelCase )
a = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : int ) -> List[Any]:
a = self.prepare_config_and_inputs()
a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def __UpperCAmelCase ( self : Any ) -> None:
a = FlaxRegNetModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
return
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
pass
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
a = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
def check_hidden_states_output(__lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
a = model_class(__lowerCamelCase )
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> str:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
a = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase : List[str] , **__lowerCamelCase : List[Any] ):
return model(pixel_values=__lowerCamelCase , **__lowerCamelCase )
with self.subTest("JIT Enabled" ):
a = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
a = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __magic_name__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : Any ) -> List[str]:
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
a = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__lowerCamelCase , return_tensors="np" )
a = model(**__lowerCamelCase )
# verify the logits
a = (1, 10_00)
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
a = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 107 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : List[Any] =LongformerTokenizer
a : str =True
a : Any =LongformerTokenizerFast
a : Optional[int] =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase : Tuple = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase : Dict = {"unk_token": "<unk>"}
lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = "lower newer"
lowerCAmelCase : List[str] = "lower newer"
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase : Optional[Any] = "lower newer"
lowerCAmelCase : str = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase : Optional[Any] = tokenizer.tokenize(snake_case__ ) # , add_prefix_space=True)
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=snake_case__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=snake_case__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
lowerCAmelCase : int = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
lowerCAmelCase : str = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
lowerCAmelCase : Any = tokenizer.encode(
"sequence builders" , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.get_tokenizer()
lowerCAmelCase : int = "Encode this sequence."
lowerCAmelCase : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
lowerCAmelCase : Optional[Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(snake_case__ , snake_case__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowerCAmelCase : Dict = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(snake_case__ , snake_case__ )
# Testing spaces after special tokens
lowerCAmelCase : Optional[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ )} ) # mask token has a left space
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(snake_case__ )
lowerCAmelCase : Union[str, Any] = "Encode <mask> sequence"
lowerCAmelCase : Union[str, Any] = "Encode <mask>sequence"
lowerCAmelCase : str = tokenizer.encode(snake_case__ )
lowerCAmelCase : Any = encoded.index(snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.encode(snake_case__ )
lowerCAmelCase : Tuple = encoded.index(snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
lowerCAmelCase : Union[str, Any] = "A, <mask> AllenNLP sentence."
lowerCAmelCase : List[Any] = tokenizer_r.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
lowerCAmelCase : Dict = tokenizer_p.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowerCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def lowercase__ ( self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , snake_case__ )
self.assertEqual(post_processor_state["add_prefix_space"] , snake_case__ )
self.assertEqual(post_processor_state["trim_offsets"] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase : Optional[int] = f"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) , )
lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : List[Any] = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) , )
lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : Any = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case__ ), len(snake_case__ ) + 1 + len(snake_case__ )) , )
lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case__ ), len(snake_case__ ) + 1 + len(snake_case__ )) , )
lowerCAmelCase : List[Any] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : Any = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) , )
lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : Optional[int] = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case__ ), 1 + len(snake_case__ ) + 1 + len(snake_case__ )) , )
lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ )
lowerCAmelCase : Union[str, Any] = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case__ ), 1 + len(snake_case__ ) + 1 + len(snake_case__ )) , )
| 108 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vision-encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> str:
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
A_ : Optional[int] = kwargs.pop("""encoder""" )
A_ : List[str] = encoder_config.pop("""model_type""" )
A_ : str = kwargs.pop("""decoder""" )
A_ : Optional[Any] = decoder_config.pop("""model_type""" )
A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : int = True
A_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
A_ : Optional[Any] = OrderedDict()
A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
import torch
A_ : Optional[int] = OrderedDict()
A_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
A_ , A_ : str = dummy_input["""input_ids"""].shape
A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
A_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
A_ : List[str] = dummy_input.pop("""attention_mask""" )
A_ : Optional[int] = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> None:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig:
A_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 344 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Tuple = logging.get_logger(__name__)
A: List[Any] = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = 'xmod'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=("en_XX",) , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Any = type_vocab_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : List[Any] = position_embedding_type
UpperCAmelCase : Tuple = use_cache
UpperCAmelCase : int = classifier_dropout
UpperCAmelCase : Any = pre_norm
UpperCAmelCase : Tuple = adapter_reduction_factor
UpperCAmelCase : str = adapter_layer_norm
UpperCAmelCase : Any = adapter_reuse_layer_norm
UpperCAmelCase : List[str] = ln_before_adapter
UpperCAmelCase : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = default_language
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 109 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.'
UpperCamelCase__ : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A_ : int = bytes(a_ , """utf-8""" )
with zstd.open(a_ , """wb""" ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Any = input_paths[compression_format]
A_ : Tuple = tmp_path / """cache"""
A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
A_ : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
A_ : Optional[Any] = f.read()
with open(a_ ) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = """custom_cache"""
A_ : List[str] = """custom_extracted_dir"""
A_ : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : List[Any] = xz_file
A_ : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
A_ : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
A_ : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with pytest.raises(a_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
http_get("""https://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head("""s3://huggingface.co""" )
| 344 | 0 |
def UpperCamelCase( __UpperCamelCase : Dict ):
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
lowerCAmelCase_ : Optional[Any] = [True] * (num + 1)
lowerCAmelCase_ : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p ,num + 1 ,a_ ):
lowerCAmelCase_ : Optional[Any] = False
p += 1
return [prime for prime in range(2 ,num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : List[str] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 103 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
__UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCAmelCase ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=8 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_lowerCamelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
SCREAMING_SNAKE_CASE =CLIPTextModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Tuple ,snake_case : str=0 ):
SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 32, 32) ,rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE =image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('RGB' )
if str(_lowerCamelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE =torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.get_dummy_components()
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE =sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_lowerCamelCase )
SCREAMING_SNAKE_CASE =sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE =np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.get_dummy_components()
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE =sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_lowerCamelCase )
SCREAMING_SNAKE_CASE ="""french fries"""
SCREAMING_SNAKE_CASE =sd_pipe(**_lowerCamelCase ,negative_prompt=_lowerCamelCase )
SCREAMING_SNAKE_CASE =output.images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE =np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.get_dummy_components()
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE =sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_lowerCamelCase )
SCREAMING_SNAKE_CASE =[inputs["""prompt"""]] * 2
SCREAMING_SNAKE_CASE =np.array(inputs['image'] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE =torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE =image / 2 + 0.5
SCREAMING_SNAKE_CASE =image.permute(0 ,3 ,1 ,2 )
SCREAMING_SNAKE_CASE =image.repeat(2 ,1 ,1 ,1 )
SCREAMING_SNAKE_CASE =sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE =np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.get_dummy_components()
SCREAMING_SNAKE_CASE =EulerAncestralDiscreteScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule='scaled_linear' )
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE =sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_lowerCamelCase )
SCREAMING_SNAKE_CASE =sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE =[round(_lowerCamelCase ,4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(_lowerCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE =np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : int ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.get_dummy_components()
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE =VaeImageProcessor(do_resize=_lowerCamelCase ,do_normalize=_lowerCamelCase )
SCREAMING_SNAKE_CASE =pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs_by_type(_lowerCamelCase ,input_image_type='pt' ) )[0]
SCREAMING_SNAKE_CASE =components["""vae"""]
SCREAMING_SNAKE_CASE =self.get_dummy_inputs_by_type(_lowerCamelCase ,input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE =vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE =pipe(**_lowerCamelCase )[0]
SCREAMING_SNAKE_CASE =np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCamelCase ,1e-4 ,'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple=0 ):
SCREAMING_SNAKE_CASE =torch.manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
SCREAMING_SNAKE_CASE ={
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE =self.get_inputs()
SCREAMING_SNAKE_CASE =pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE =np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=_lowerCamelCase )
SCREAMING_SNAKE_CASE =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE =self.get_inputs()
SCREAMING_SNAKE_CASE =pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE =np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=_lowerCamelCase )
SCREAMING_SNAKE_CASE =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE =self.get_inputs()
SCREAMING_SNAKE_CASE =pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE =np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =0
def callback_fn(snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : str ) -> None:
SCREAMING_SNAKE_CASE =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE =latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE =np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
SCREAMING_SNAKE_CASE =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE =latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE =np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=_lowerCamelCase ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE =pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE =self.get_inputs()
pipe(**_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowerCAmelCase ( self : int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=_lowerCamelCase ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE =pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE =self.get_inputs()
SCREAMING_SNAKE_CASE =pipe(**_lowerCamelCase )
SCREAMING_SNAKE_CASE =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE =inputs["""image"""].resize((504, 504) )
SCREAMING_SNAKE_CASE ="""timbrooks/instruct-pix2pix"""
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCamelCase ,safety_checker=_lowerCamelCase ,)
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE =pipe(**_lowerCamelCase )
SCREAMING_SNAKE_CASE =output.images[0]
SCREAMING_SNAKE_CASE =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE =np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 334 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344 | 0 |
from __future__ import annotations
import requests
lowerCAmelCase = set(
'''approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'''.split()
)
def _lowerCamelCase( lowercase__ , lowercase__ = 1 , lowercase__ = "new" , lowercase__ = None ) -> dict:
'''simple docstring'''
__lowercase= wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a_ ) - valid_terms ) ):
__lowercase= F'Invalid search term: {invalid_search_terms}'
raise ValueError(a_ )
__lowercase= requests.get(
F'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={'User-agent': 'A random string'} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
__lowercase= response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a_ )}
__lowercase= {}
for id_ in range(a_ ):
__lowercase= {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 295 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase : List[str] = imread(R'digital_image_processing/image_data/lena_small.jpg')
lowercase : str = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[str] = cn.convert_to_negative(a_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[int] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : int = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A : Optional[Any] = canny.canny(a_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert gg.gaussian_filter(a_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[int] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A : int = conv.img_convolve(a_ , a_ ).astype(a_ )
assert res.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert med.median_filter(a_ , 3 ).any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[str] = sob.sobel_filter(a_ )
assert grad.any() and theta.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[Any] = sp.make_sepia(a_ , 20 )
assert sepia.all()
def lowerCAmelCase_ ( snake_case__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A : Union[str, Any] = bs.Burkes(imread(a_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase_ ( snake_case__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A : str = rs.NearestNeighbour(imread(a_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[Any] = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A : Dict = imread(a_ , 0 )
# Test for get_neighbors_pixel function() return not None
A : Union[str, Any] = 0
A : List[str] = 0
A : Optional[int] = image[x_coordinate][y_coordinate]
A : Union[str, Any] = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A : List[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A : int = lbp.local_binary_value(a_ , a_ , a_ )
assert lbp_image.any()
| 3 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]:
A_ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : List[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
A_ : Dict = scheduler_class(**_lowerCamelCase )
A_ : Dict = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Any = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[Any] = pred_prev_sample
A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(25 )
A_ : List[str] = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
if i + 1 == timesteps.shape[0]:
A_ : List[str] = None
else:
A_ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Optional[Any] = pred_prev_sample
A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
pass
| 344 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__UpperCamelCase : Union[str, Any] = random.Random()
def __A ( __lowerCamelCase , __lowerCamelCase=1.0 , __lowerCamelCase=None , __lowerCamelCase=None ) -> str:
if rng is None:
a = global_rng
a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Tuple , __magic_name__ :Dict , __magic_name__ :Tuple=7 , __magic_name__ :Union[str, Any]=400 , __magic_name__ :Optional[Any]=2000 , __magic_name__ :Union[str, Any]=24 , __magic_name__ :str=24 , __magic_name__ :str=0.0 , __magic_name__ :int=1_6000 , __magic_name__ :Optional[int]=True , __magic_name__ :str=True , ):
'''simple docstring'''
a = parent
a = batch_size
a = min_seq_length
a = max_seq_length
a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a = feature_size
a = num_mel_bins
a = padding_value
a = sampling_rate
a = return_attention_mask
a = do_normalize
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int=False , __magic_name__ :Union[str, Any]=False ):
'''simple docstring'''
def _flatten(__magic_name__ :int ):
return list(itertools.chain(*_lowerCamelCase ) )
if equal_length:
a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a = [np.asarray(_lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( __A , unittest.TestCase ):
UpperCamelCase__ = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = SpeechaTextFeatureExtractionTester(self )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[Any] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_lowerCamelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCamelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
a = feature_extractor(_lowerCamelCase , padding=_lowerCamelCase , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
a = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
a = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
# Test batched
a = feature_extractor(_lowerCamelCase , return_tensors="""np""" ).input_features
a = feature_extractor(_lowerCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a = np.asarray(_lowerCamelCase )
a = feature_extractor(_lowerCamelCase , return_tensors="""np""" ).input_features
a = feature_extractor(_lowerCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = ["""longest""", """max_length""", """do_not_pad"""]
a = [None, 16, None]
for max_length, padding in zip(_lowerCamelCase , _lowerCamelCase ):
a = feature_extractor(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_attention_mask=_lowerCamelCase )
a = inputs.input_features
a = inputs.attention_mask
a = [np.sum(_lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = ["""longest""", """max_length""", """do_not_pad"""]
a = [None, 16, None]
for max_length, padding in zip(_lowerCamelCase , _lowerCamelCase ):
a = feature_extractor(
_lowerCamelCase , max_length=_lowerCamelCase , padding=_lowerCamelCase , return_tensors="""np""" , return_attention_mask=_lowerCamelCase )
a = inputs.input_features
a = inputs.attention_mask
a = [np.sum(_lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = feature_extractor(
_lowerCamelCase , padding="""max_length""" , max_length=4 , truncation=_lowerCamelCase , return_tensors="""np""" , return_attention_mask=_lowerCamelCase , )
a = inputs.input_features
a = inputs.attention_mask
a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = feature_extractor(
_lowerCamelCase , padding="""longest""" , max_length=4 , truncation=_lowerCamelCase , return_tensors="""np""" , return_attention_mask=_lowerCamelCase , )
a = inputs.input_features
a = inputs.attention_mask
a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = feature_extractor(
_lowerCamelCase , padding="""longest""" , max_length=16 , truncation=_lowerCamelCase , return_tensors="""np""" , return_attention_mask=_lowerCamelCase , )
a = inputs.input_features
a = inputs.attention_mask
a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
import torch
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = np.random.rand(100 , 32 ).astype(np.floataa )
a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[int] ):
'''simple docstring'''
from datasets import load_dataset
a = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
a = ds.sort("""id""" ).select(range(_lowerCamelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
a = self._load_datasamples(1 )
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = feature_extractor(_lowerCamelCase , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _lowerCamelCase , atol=1E-4 ) )
| 228 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344 | 0 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = args.log_outputs
_snake_case = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
_snake_case = load_metric("""wer""" )
_snake_case = load_metric("""cer""" )
# compute metrics
_snake_case = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
_snake_case = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
_snake_case = f"""WER: {wer_result}\nCER: {cer_result}"""
print(a_ )
with open(f"""{dataset_id}_eval_results.txt""" , """w""" ) as f:
f.write(a_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_snake_case = f"""log_{dataset_id}_predictions.txt"""
_snake_case = f"""log_{dataset_id}_targets.txt"""
with open(a_ , """w""" ) as p, open(a_ , """w""" ) as t:
# mapping function to write output
def write_to_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
p.write(f"""{i}""" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f"""{i}""" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(a_ , with_indices=a_ )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_snake_case = re.sub(a_ , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_snake_case = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
_snake_case = """ """.join(text.split(a_ ) )
return text
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=a_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
_snake_case = feature_extractor.sampling_rate
# resample audio
_snake_case = dataset.cast_column("""audio""" , Audio(sampling_rate=a_ ) )
# load eval pipeline
if args.device is None:
_snake_case = 0 if torch.cuda.is_available() else -1
_snake_case = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_SCREAMING_SNAKE_CASE ):
_snake_case = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_snake_case = prediction["""text"""]
_snake_case = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
_snake_case = dataset.map(a_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(a_ , a_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
__lowerCAmelCase = parser.parse_args()
main(args) | 341 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str:
super().__init__()
A_ : Optional[Any] = pad_token_id
A_ : List[Any] = max_length
A_ : str = vocab
A_ : Union[str, Any] = merges
A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int:
A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
A_ : Dict = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str:
A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]:
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any:
A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase )
A_ : Any = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : Tuple = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 344 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
if len(a_ ) <= 1:
return [tuple(a_ )]
_lowerCAmelCase = []
def generate(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , a_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_lowerCAmelCase = arr[k - 1], arr[i]
else: # k is odd
_lowerCAmelCase = arr[k - 1], arr[0]
generate(k - 1 , a_ )
generate(len(a_ ) , a_ )
return res
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("Enter numbers separated by a comma:\n").strip()
_SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 158 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __A ( unittest.TestCase ):
def lowercase__ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ):
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for a, b in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertAlmostEqual(_lowerCamelCase , _lowerCamelCase , delta=_lowerCamelCase )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Dict = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Dict = None
ops.enable_eager_execution_internal()
lowerCAmelCase : Tuple = tf.config.list_physical_devices('CPU' )
if len(_lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCAmelCase : str = tf.config.list_logical_devices(device_type='CPU' )
lowerCAmelCase : Union[str, Any] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCAmelCase : Any = GradientAccumulator()
lowerCAmelCase : List[Any] = tf.Variable([4.0, 3.0] )
lowerCAmelCase : Optional[Any] = create_optimizer(5E-5 , 10 , 5 )
lowerCAmelCase : Tuple = tf.Variable([0.0, 0.0] , trainable=_lowerCamelCase )
def accumulate_on_replica(UpperCAmelCase_ : Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] ):
with strategy.scope():
lowerCAmelCase : Union[str, Any] = strategy.experimental_local_results(_lowerCamelCase )
local_variables[0].assign(_lowerCamelCase )
local_variables[1].assign(_lowerCamelCase )
strategy.run(_lowerCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowerCamelCase )
def _check_local_values(UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Dict = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowerCamelCase , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _lowerCamelCase , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 138 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase = logging.get_logger(__name__)
class A_ ( __A ):
'''simple docstring'''
_UpperCamelCase : Any = """vision-encoder-decoder"""
_UpperCamelCase : int = True
def __init__( self , **snake_case ):
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowercase = kwargs.pop('encoder' )
lowercase = encoder_config.pop('model_type' )
lowercase = kwargs.pop('decoder' )
lowercase = decoder_config.pop('model_type' )
lowercase = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
lowercase = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
lowercase = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case , **snake_case ):
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
lowercase = True
lowercase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.encoder.to_dict()
lowercase = self.decoder.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __A ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class A_ ( __A ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OrderedDict()
lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
lowercase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ):
import torch
lowercase = OrderedDict()
lowercase = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
lowercase = dummy_input["""input_ids"""].shape
lowercase = (batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase = dummy_input.pop('input_ids' )
lowercase = dummy_input.pop('attention_mask' )
lowercase = torch.zeros(_lowerCamelCase )
return common_inputs
class A_ ( __A ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = "default" ):
lowercase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 195 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( a_ , a_ ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__UpperCAmelCase = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__UpperCAmelCase = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
__UpperCAmelCase = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def snake_case_ ( self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install \"sacrebleu>=1.4.12\"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence' ),
'references': datasets.Sequence(datasets.Value('string', id='sequence' ), id='references' ),
} ), codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'], reference_urls=[
'https://github.com/m-popovic/chrF',
], )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = CHRF.CHAR_ORDER, SCREAMING_SNAKE_CASE_ = CHRF.WORD_ORDER, SCREAMING_SNAKE_CASE_ = CHRF.BETA, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, ) -> Tuple:
UpperCamelCase : int = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
UpperCamelCase : Optional[Any] = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
UpperCamelCase : List[Any] = CHRF(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
UpperCamelCase : int = sb_chrf.corpus_score(_lowerCamelCase, _lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 119 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any:
A_ : List[Any] = parent
A_ : int = config_class
A_ : int = has_text_modality
A_ : str = kwargs
A_ : int = common_properties
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : Optional[int] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
A_ : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = self.config_class(**self.inputs_dict )
A_ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" )
config_first.to_json_file(_lowerCamelCase )
A_ : Dict = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : List[Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A_ : str = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_class.is_composition:
return
A_ : Dict = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = copy.deepcopy(_lowerCamelCase )
A_ : Tuple = self.config_class(**_lowerCamelCase )
A_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 344 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( __A):
__lowerCAmelCase : str = (DDPMParallelScheduler,)
def a_ ( self : Dict , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowerCamelCase )
return config
def a_ ( self : Dict ):
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def a_ ( self : Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def a_ ( self : Dict ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def a_ ( self : Tuple ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def a_ ( self : Any ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def a_ ( self : List[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def a_ ( self : List[str] ):
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_lowerCamelCase )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Any = self.scheduler_classes[0]
A_ : List[Any] = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def a_ ( self : Any ):
"""simple docstring"""
A_ : Tuple = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Union[str, Any] = scheduler_class(**_lowerCamelCase )
A_ : Optional[int] = len(_lowerCamelCase )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = self.dummy_sample_deter + 0.1
A_ : Optional[Any] = self.dummy_sample_deter - 0.1
A_ : List[Any] = samplea.shape[0]
A_ : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
A_ : Optional[Any] = torch.arange(_lowerCamelCase )[0:3, None].repeat(1 , _lowerCamelCase )
A_ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A_ : str = scheduler.batch_step_no_noise(_lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
A_ : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : int = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**_lowerCamelCase )
A_ : Union[str, Any] = len(_lowerCamelCase )
A_ : Dict = self.dummy_model()
A_ : Any = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Union[str, Any] = pred_prev_sample
A_ : int = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def a_ ( self : Any ):
"""simple docstring"""
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : List[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
A_ : int = scheduler_class(**_lowerCamelCase )
A_ : Dict = len(_lowerCamelCase )
A_ : Union[str, Any] = self.dummy_model()
A_ : List[str] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
A_ : str = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : Tuple = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[str] = pred_prev_sample
A_ : str = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
A_ : List[str] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCamelCase )
A_ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_lowerCamelCase ):
if i == len(_lowerCamelCase ) - 1:
A_ : int = -1
else:
A_ : Dict = timesteps[i + 1]
A_ : Union[str, Any] = scheduler.previous_timestep(_lowerCamelCase )
A_ : str = prev_t.item()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : List[Any] = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
A_ : List[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(_lowerCamelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : str = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = [1_00, 87, 50, 1, 0]
A_ : List[str] = len(_lowerCamelCase )
with self.assertRaises(_lowerCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
| 167 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.