code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if len(__lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
__snake_case : Optional[int] = sum(array[:k] )
for i in range(len(__lowerCamelCase ) - k ):
__snake_case : Dict = current_sum - array[i] + array[i + k]
__snake_case : int = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_snake_case : Optional[Any] = [randint(-1_000, 1_000) for i in range(100)]
_snake_case : Optional[int] = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 81 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase : int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 50 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ = 20 ):
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 82 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : list, A_ : list, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : int = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Dict = knapsack(A_, A_, A_, A_, index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Tuple = values[index] + knapsack(
A_, A_, A_, max_weight - weights[index], index + 1 )
return max(A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 |
'''simple docstring'''
from PIL import Image
def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 50 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE=None ):
if subparsers is not None:
lowercase = subparsers.add_parser('env' )
else:
lowercase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=__SCREAMING_SNAKE_CASE , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = torch.__version__
lowercase = torch.cuda.is_available()
lowercase = is_xpu_available()
lowercase = is_npu_available()
lowercase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase = load_config_from_file(args.config_file ).to_dict()
lowercase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(__SCREAMING_SNAKE_CASE ),
'PyTorch NPU available': str(__SCREAMING_SNAKE_CASE ),
'System RAM': F'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
lowercase = (
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else F'''\t{accelerate_config}'''
)
print(__SCREAMING_SNAKE_CASE )
lowercase = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase = env_command_parser()
lowercase = parser.parse_args()
env_command(__SCREAMING_SNAKE_CASE )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 84 |
'''simple docstring'''
def A__ ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCamelCase : Dict = generate_large_matrix()
UpperCamelCase : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__ ( __lowerCAmelCase : list[list[int]] ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase__ = (left + right) // 2
lowerCamelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def A__ ( __lowerCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def A__ ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowerCamelCase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 50 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"vocab_file": "vocab.txt"}
SCREAMING_SNAKE_CASE__ : int = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
SCREAMING_SNAKE_CASE__ : Dict = {
"openbmb/cpm-ant-10b": 1024,
}
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = collections.OrderedDict()
with open(lowercase__ , 'r' , encoding='utf-8' ) as reader:
SCREAMING_SNAKE_CASE__ : Dict = reader.readlines()
for index, token in enumerate(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = token.rstrip('\n' )
SCREAMING_SNAKE_CASE__ : Dict = index
return vocab
class snake_case ( UpperCamelCase_ ):
def __init__( self : List[Any] , a_ : List[str] , a_ : Union[str, Any]="<unk>" , a_ : int=200 )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = vocab
SCREAMING_SNAKE_CASE__ : Tuple = unk_token
SCREAMING_SNAKE_CASE__ : int = max_input_chars_per_word
def __lowercase( self : Union[str, Any] , a_ : Optional[int] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = list(a_ )
if len(a_ ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : List[str] = []
while start < len(a_ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a_ )
SCREAMING_SNAKE_CASE__ : str = None
while start < end:
SCREAMING_SNAKE_CASE__ : Any = ''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__ : str = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a_ )
SCREAMING_SNAKE_CASE__ : Dict = end
return sub_tokens
class snake_case ( UpperCamelCase_ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ['input_ids', 'attention_mask']
lowercase_ = False
def __init__( self : Any , a_ : Union[str, Any] , a_ : Dict="<d>" , a_ : List[Any]="</d>" , a_ : Tuple="<s>" , a_ : Tuple="</s>" , a_ : Optional[Any]="<pad>" , a_ : Optional[Any]="<unk>" , a_ : Union[str, Any]="</n>" , a_ : int="</_>" , a_ : Any="left" , **a_ : Optional[int] , )-> str:
"""simple docstring"""
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=a_ , eod_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , unk_token=a_ , line_token=a_ , space_token=a_ , padding_side=a_ , **a_ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = bod_token
SCREAMING_SNAKE_CASE__ : List[str] = eod_token
SCREAMING_SNAKE_CASE__ : List[str] = load_vocab(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.encoder[space_token]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__ : List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : str = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __lowercase( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __lowercase( self : str )-> Any:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
return len(self.encoder )
def __lowercase( self : Optional[int] )-> List[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __lowercase( self : Any , a_ : Any )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = []
for x in jieba.cut(a_ , cut_all=a_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a_ ) )
return output_tokens
def __lowercase( self : str , a_ : int , **a_ : str )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__ : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a_ , **a_ )
def __lowercase( self : Optional[int] , a_ : List[str] )-> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __lowercase( self : str , a_ : List[str] )-> str:
"""simple docstring"""
return "".join(a_ )
def __lowercase( self : int , a_ : Tuple )-> int:
"""simple docstring"""
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def __lowercase( self : str , a_ : List[Any] )-> Tuple:
"""simple docstring"""
return self.decoder.get(a_ , self.unk_token )
def __lowercase( self : Any , a_ : str , a_ : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if os.path.isdir(a_ ):
SCREAMING_SNAKE_CASE__ : str = os.path.join(
a_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
SCREAMING_SNAKE_CASE__ : Tuple = (filename_prefix + '-' if filename_prefix else '') + save_directory
SCREAMING_SNAKE_CASE__ : str = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__ : Dict = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__ : Dict = self.encoder['\n']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__ : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
with open(a_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __lowercase( self : Tuple , a_ : List[int] , a_ : List[int] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __lowercase( self : int , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ ))
return [1] + ([0] * len(a_ ))
| 85 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase : List[Any] = 'examples/'
UpperCamelCase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase : Any = 'README.md'
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern]
lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start of the list.
lowerCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def A__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ = default_version.base_version
elif patch:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A__ ( ):
lowerCamelCase__ = get_version()
lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 50 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[Any]=99 , UpperCAmelCase : List[str]=32 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : Dict=16 , UpperCAmelCase : int=2 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : List[str]=False , UpperCAmelCase : List[str]=True , UpperCAmelCase : Tuple="None" , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : str=4 , UpperCAmelCase : List[str]=None , ):
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = relative_attention
A_ = position_biased_input
A_ = pos_att_type
A_ = scope
def __A ( self : Tuple ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[int] ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self : Optional[int] , UpperCAmelCase : Tuple ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] ):
A_ = DebertaVaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )[0]
A_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )[0]
A_ = model(UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict ):
A_ = DebertaVaForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : int , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ):
A_ = self.num_labels
A_ = DebertaVaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ):
A_ = self.num_labels
A_ = DebertaVaForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
A_ = DebertaVaForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : int ):
A_ = DebertaVaForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase : List[str] = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Dict = True
_lowerCamelCase : Any = False
_lowerCamelCase : str = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Optional[Any] = False
def __A ( self : Dict ):
A_ = DebertaVaModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def __A ( self : List[str] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCAmelCase )
def __A ( self : Union[str, Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCAmelCase )
def __A ( self : Any ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCAmelCase )
def __A ( self : Any ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCAmelCase )
def __A ( self : str ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*UpperCAmelCase )
@slow
def __A ( self : Optional[int] ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = DebertaVaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def __A ( self : Dict ):
pass
@slow
def __A ( self : Tuple ):
A_ = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
A_ = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
A_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
# compare the actual values for a slice.
A_ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 86 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[int] = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __lowerCAmelCase : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def A__ ( __lowerCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = set()
for token in tokens:
lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase__ = list(__lowerCAmelCase )
return word_list
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase__ = bert_tokens
lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase__ = True
if is_chinese(bert_word[start] ):
lowerCamelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase__ = """##""" + bert_word[j]
lowerCamelCase__ = start + i
lowerCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ):
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = []
for id in input_ids:
lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__ ( __lowerCAmelCase : Optional[int] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase : Any = parser.parse_args()
main(args)
| 50 | 0 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase__ ( A_ ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = "arrow" , **SCREAMING_SNAKE_CASE , ) -> List[Any]:
super().__init__(
split=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE , streaming=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_lowerCamelCase : str = load_from_cache_file
_lowerCamelCase : Tuple = file_format
_lowerCamelCase : List[str] = Spark(
df=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , working_dir=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> Optional[Any]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
_lowerCamelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 88 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE : Optional[Any] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, 'models/bert/'))
_lowercase : List[Any] = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase, 'src/transformers/models/bert/modeling_bert.py'), os.path.join(self.transformer_dir, 'models/bert/modeling_bert.py'), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_lowercase : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_lowercase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_19)
_lowercase : Union[str, Any] = black.format_str(lowerCamelCase, mode=lowerCamelCase)
_lowercase : Optional[int] = os.path.join(self.transformer_dir, 'new_code.py')
with open(lowerCamelCase, 'w', newline='\n') as f:
f.write(lowerCamelCase)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase)) == 0)
else:
check_copies.is_copy_consistent(f.name, overwrite=lowerCamelCase)
with open(lowerCamelCase, 'r') as f:
self.assertTrue(f.read(), lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', REFERENCE_CODE + '\n', )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', lowerCamelCase, )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', re.sub('Bert', 'TestModel', lowerCamelCase), )
# Copy consistency with a really long name
_lowercase : Tuple = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''', F'''{long_class_name}LMPredictionHead''', re.sub('Bert', lowerCamelCase, lowerCamelCase), )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', lowerCamelCase, overwrite_result=re.sub('Bert', 'TestModel', lowerCamelCase), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = check_copies.LOCALIZED_READMES['README_zh-hans.md']
_lowercase : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_lowercase : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_lowercase , _lowercase : List[Any] = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['format_model_list'])
self.assertFalse(lowerCamelCase)
self.assertEqual(lowerCamelCase, lowerCamelCase)
_lowercase , _lowercase : List[str] = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['format_model_list'])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase)
_lowercase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_lowercase : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase , _lowercase : Dict = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['format_model_list'])
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase, lowerCamelCase)
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=30 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=10 , lowerCamelCase_=0.02 , lowerCamelCase_=None , lowerCamelCase_=2 , ) -> Any:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = ViTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = ViTForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = ViTForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = ViTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = ViTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase__ : Tuple = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Any = True
lowercase__ : Union[str, Any] = False
lowercase__ : Any = False
lowercase__ : Tuple = False
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = ViTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = ViTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _snake_case ( ) -> List[Any]:
lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
lowerCAmelCase__ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(lowerCamelCase_ )
lowerCAmelCase__ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_80 )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase__ = inputs.pixel_values.to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(lowerCamelCase_ , interpolate_pos_encoding=lowerCamelCase_ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase__ = inputs.pixel_values.to(lowerCamelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCAmelCase__ = model(lowerCamelCase_ )
| 90 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int = 10 ):
if not isinstance(snake_case__ , snake_case__ ) or n < 0:
raise ValueError('Invalid input' )
A = 10**n
A = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 91 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000 ) -> int:
lowercase : Optional[Any] =2**power
lowercase : Dict =0
while n:
lowercase , lowercase : Optional[Any] =r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 92 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase__ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ = src_path
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 50 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Dict = """wavlm"""
def __init__( self , __UpperCAmelCase=3_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=1_6 , __UpperCAmelCase=3_2_0 , __UpperCAmelCase=8_0_0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=1_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1_0 , __UpperCAmelCase=3_2_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __UpperCAmelCase=(5, 3, 3, 1, 1) , __UpperCAmelCase=(1, 2, 3, 1, 1) , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=8_0 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ :int = hidden_size
lowerCAmelCase__ :Optional[Any] = feat_extract_norm
lowerCAmelCase__ :List[str] = feat_extract_activation
lowerCAmelCase__ :List[Any] = list(__UpperCAmelCase )
lowerCAmelCase__ :int = list(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = list(__UpperCAmelCase )
lowerCAmelCase__ :str = conv_bias
lowerCAmelCase__ :Any = num_buckets
lowerCAmelCase__ :Optional[Any] = max_bucket_distance
lowerCAmelCase__ :str = num_conv_pos_embeddings
lowerCAmelCase__ :int = num_conv_pos_embedding_groups
lowerCAmelCase__ :Dict = len(self.conv_dim )
lowerCAmelCase__ :int = num_hidden_layers
lowerCAmelCase__ :int = intermediate_size
lowerCAmelCase__ :List[str] = hidden_act
lowerCAmelCase__ :Any = num_attention_heads
lowerCAmelCase__ :Optional[Any] = hidden_dropout
lowerCAmelCase__ :Union[str, Any] = attention_dropout
lowerCAmelCase__ :Tuple = activation_dropout
lowerCAmelCase__ :Union[str, Any] = feat_proj_dropout
lowerCAmelCase__ :Optional[Any] = final_dropout
lowerCAmelCase__ :int = layerdrop
lowerCAmelCase__ :Union[str, Any] = layer_norm_eps
lowerCAmelCase__ :int = initializer_range
lowerCAmelCase__ :Any = num_ctc_classes
lowerCAmelCase__ :int = vocab_size
lowerCAmelCase__ :Dict = do_stable_layer_norm
lowerCAmelCase__ :Tuple = use_weighted_layer_sum
lowerCAmelCase__ :Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ :str = apply_spec_augment
lowerCAmelCase__ :Any = mask_time_prob
lowerCAmelCase__ :Union[str, Any] = mask_time_length
lowerCAmelCase__ :List[Any] = mask_time_min_masks
lowerCAmelCase__ :Dict = mask_feature_prob
lowerCAmelCase__ :Union[str, Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ :Optional[Any] = num_codevectors_per_group
lowerCAmelCase__ :List[str] = num_codevector_groups
lowerCAmelCase__ :Union[str, Any] = contrastive_logits_temperature
lowerCAmelCase__ :List[str] = num_negatives
lowerCAmelCase__ :Union[str, Any] = codevector_dim
lowerCAmelCase__ :Optional[Any] = proj_codevector_dim
lowerCAmelCase__ :Optional[int] = diversity_loss_weight
# ctc loss
lowerCAmelCase__ :Any = ctc_loss_reduction
lowerCAmelCase__ :str = ctc_zero_infinity
# adapter
lowerCAmelCase__ :Optional[int] = add_adapter
lowerCAmelCase__ :Tuple = adapter_kernel_size
lowerCAmelCase__ :Tuple = adapter_stride
lowerCAmelCase__ :List[Any] = num_adapter_layers
lowerCAmelCase__ :Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ :int = list(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = list(__UpperCAmelCase )
lowerCAmelCase__ :Any = list(__UpperCAmelCase )
lowerCAmelCase__ :Any = xvector_output_dim
@property
def snake_case ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 93 |
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCamelCase__ = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 50 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( __A : tuple[int, int] , __A : int ) -> list[tuple[int, int]]:
"""simple docstring"""
lowercase , lowercase : Tuple =position
lowercase : Optional[Any] =[
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase : int =[]
for position in positions:
lowercase , lowercase : List[str] =position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def lowercase_ ( __A : list[list[int]] ) -> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def lowercase_ ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ) -> bool:
"""simple docstring"""
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
lowercase , lowercase : Union[str, Any] =position
if board[y][x] == 0:
lowercase : Optional[Any] =curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
lowercase : Optional[int] =0
return False
def lowercase_ ( __A : int ) -> list[list[int]]:
"""simple docstring"""
lowercase : Dict =[[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
lowercase : Dict =1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
lowercase : int =0
lowercase : List[Any] =F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = 0
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Tuple = Path(lowerCAmelCase_ ) / "preprocessor_config.json"
UpperCAmelCase_ : Optional[Any] = Path(lowerCAmelCase_ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase_ , "w" ) , )
json.dump({"model_type": "clip"} , open(lowerCAmelCase_ , "w" ) )
UpperCAmelCase_ : Optional[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : List[Any] = Path(lowerCAmelCase_ ) / "preprocessor_config.json"
UpperCAmelCase_ : Dict = Path(lowerCAmelCase_ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase_ , "w" ) , )
json.dump({"model_type": "clip"} , open(lowerCAmelCase_ , "w" ) )
UpperCAmelCase_ : List[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : str = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase_ : Union[str, Any] = Path(lowerCAmelCase_ ) / "preprocessor_config.json"
UpperCAmelCase_ : List[Any] = Path(lowerCAmelCase_ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase_ , "w" ) , )
json.dump({"model_type": "clip"} , open(lowerCAmelCase_ , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase_ : str = AutoImageProcessor.from_pretrained(lowerCAmelCase_ ).to_dict()
config_dict.pop("image_processor_type" )
UpperCAmelCase_ : Dict = CLIPImageProcessor(**lowerCAmelCase_ )
# save in new folder
model_config.save_pretrained(lowerCAmelCase_ )
config.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ : List[Any] = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Optional[Any] = Path(lowerCAmelCase_ ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase_ , "w" ) , )
UpperCAmelCase_ : Optional[int] = AutoImageProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
with self.assertRaisesRegex(
lowerCAmelCase_ , "clip-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ : Any = AutoImageProcessor.from_pretrained("clip-base" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowerCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ : Optional[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase_ , revision="aaaaaa" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
with self.assertRaisesRegex(
lowerCAmelCase_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ : List[Any] = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase_ ):
UpperCAmelCase_ : List[str] = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase_ )
UpperCAmelCase_ : str = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
try:
AutoConfig.register("custom" , lowerCAmelCase_ )
AutoImageProcessor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoImageProcessor.register(lowerCAmelCase_ , lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : List[str] = Path(lowerCAmelCase_ ) / "preprocessor_config.json"
UpperCAmelCase_ : Optional[Any] = Path(lowerCAmelCase_ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase_ , "w" ) , )
json.dump({"model_type": "clip"} , open(lowerCAmelCase_ , "w" ) )
UpperCAmelCase_ : Tuple = CustomImageProcessor.from_pretrained(lowerCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Any = AutoImageProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
class UpperCamelCase_ (__A ):
__magic_name__ = True
try:
AutoConfig.register("custom" , lowerCAmelCase_ )
AutoImageProcessor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Optional[int] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : str = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(lowerCAmelCase_ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 95 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,_lowerCAmelCase ,)
super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
class __A :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
@dataclass
class __A :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "train"
UpperCAmelCase__ = "dev"
UpperCAmelCase__ = "test"
class __A :
@staticmethod
def lowerCamelCase__ ( __snake_case : Union[str, Any] , __snake_case : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def lowerCamelCase__ ( __snake_case : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def lowerCamelCase__ ( __snake_case : List[InputExample] , __snake_case : List[str] , __snake_case : int , __snake_case : PreTrainedTokenizer , __snake_case : int=False , __snake_case : Union[str, Any]="[CLS]" , __snake_case : str=1 , __snake_case : Dict="[SEP]" , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Dict=0 , __snake_case : Dict=0 , __snake_case : Optional[int]=-1_0_0 , __snake_case : Optional[int]=0 , __snake_case : str=True , ) -> List[InputFeatures]:
__magic_name__: str = {label: i for i, label in enumerate(__snake_case )}
__magic_name__: Optional[Any] = []
for ex_index, example in enumerate(__snake_case ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , __snake_case , len(__snake_case ) )
__magic_name__: int = []
__magic_name__: List[Any] = []
for word, label in zip(example.words , example.labels ):
__magic_name__: Optional[Any] = tokenizer.tokenize(__snake_case )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__snake_case ) > 0:
tokens.extend(__snake_case )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__snake_case ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__magic_name__: int = tokenizer.num_special_tokens_to_add()
if len(__snake_case ) > max_seq_length - special_tokens_count:
__magic_name__: List[str] = tokens[: (max_seq_length - special_tokens_count)]
__magic_name__: Optional[int] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__magic_name__: Union[str, Any] = [sequence_a_segment_id] * len(__snake_case )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__magic_name__: Union[str, Any] = [cls_token] + tokens
__magic_name__: List[str] = [pad_token_label_id] + label_ids
__magic_name__: Dict = [cls_token_segment_id] + segment_ids
__magic_name__: str = tokenizer.convert_tokens_to_ids(__snake_case )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__magic_name__: int = [1 if mask_padding_with_zero else 0] * len(__snake_case )
# Zero-pad up to the sequence length.
__magic_name__: Union[str, Any] = max_seq_length - len(__snake_case )
if pad_on_left:
__magic_name__: Union[str, Any] = ([pad_token] * padding_length) + input_ids
__magic_name__: Dict = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__magic_name__: Dict = ([pad_token_segment_id] * padding_length) + segment_ids
__magic_name__: Optional[int] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__snake_case ) == max_seq_length
assert len(__snake_case ) == max_seq_length
assert len(__snake_case ) == max_seq_length
assert len(__snake_case ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(__snake_case ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(__snake_case ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(__snake_case ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(__snake_case ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(__snake_case ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__: Any = None
features.append(
InputFeatures(
input_ids=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , label_ids=__snake_case ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , __snake_case : TokenClassificationTask , __snake_case : str , __snake_case : PreTrainedTokenizer , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[int] = None , __snake_case : Any=False , __snake_case : Split = Split.train , ) -> Optional[int]:
# Load data features from cache or dataset file
__magic_name__: Union[str, Any] = os.path.join(
__snake_case , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(__snake_case ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__: Union[str, Any] = cached_features_file + """.lock"""
with FileLock(__snake_case ):
if os.path.exists(__snake_case ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
__magic_name__: List[Any] = torch.load(__snake_case )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
__magic_name__: int = token_classification_task.read_examples_from_file(__snake_case , __snake_case )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__: List[str] = token_classification_task.convert_examples_to_features(
__snake_case , __snake_case , __snake_case , __snake_case , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features , __snake_case )
def __len__( self : str ) -> str:
return len(self.features )
def __getitem__( self : Any , __snake_case : Optional[int] ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __A :
UpperCAmelCase__ = 42
UpperCAmelCase__ = -1_0_0
def __init__( self : Tuple , __snake_case : TokenClassificationTask , __snake_case : str , __snake_case : PreTrainedTokenizer , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[int] = None , __snake_case : str=False , __snake_case : Split = Split.train , ) -> List[Any]:
__magic_name__: Optional[int] = token_classification_task.read_examples_from_file(__snake_case , __snake_case )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__: Optional[int] = token_classification_task.convert_examples_to_features(
__snake_case , __snake_case , __snake_case , __snake_case , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__: Union[str, Any] = tf.data.Dataset.from_generator(
__snake_case , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__magic_name__: str = tf.data.Dataset.from_generator(
__snake_case , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
__magic_name__: Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Union[str, Any] ) -> int:
return len(self.features )
def __getitem__( self : Dict , __snake_case : int ) -> InputFeatures:
return self.features[i]
| 96 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowerCamelCase__ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase : Dict = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase : str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 50 | 0 |
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
lowercase_ = []
lowercase_ = []
lowercase_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
lowercase_ = len(snake_case__ ) if (len(snake_case__ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(snake_case__ ) , '''Postfix'''.center(snake_case__ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(snake_case__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(snake_case__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(snake_case__ ) == 0:
stack.append(snake_case__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(snake_case__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(snake_case__ ) # push x to stack
print(
x.center(8 ) , (''''''.join(snake_case__ )).ljust(snake_case__ ) , (''''''.join(snake_case__ )).ljust(snake_case__ ) , sep=''' | ''' , ) # Output in tabular format
while len(snake_case__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(snake_case__ )).ljust(snake_case__ ) , (''''''.join(snake_case__ )).ljust(snake_case__ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(snake_case__ ) # return Postfix as str
def a ( snake_case__: List[Any] ):
'''simple docstring'''
lowercase_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(snake_case__ ) ):
if infix[i] == "(":
lowercase_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
lowercase_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(snake_case__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__a = input('\nEnter an Infix Equation = ') # Input an Infix equation
__a = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 97 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowercase__ : List[str] = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : int , lowerCAmelCase__ : Path , lowerCAmelCase__ : Union[str, None] = None , lowerCAmelCase__ : Union[List[str], None] = None , lowerCAmelCase__ : Union[str, List[str], None] = None , lowerCAmelCase__ : bool = True , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [file for file in os.listdir(lowerCAmelCase__ ) if os.path.isfile(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )]
if identifier is not None:
_UpperCamelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for n_ in n_identifier:
_UpperCamelCase = [file for file in files if n_ not in file]
else:
_UpperCamelCase = [file for file in files if n_identifier not in file]
_UpperCamelCase = ignore_files or []
ignore_files.append('''__init__.py''' )
_UpperCamelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , lowerCAmelCase__ )
if only_modules:
_UpperCamelCase = file.split('''.''' )[0]
try:
_UpperCamelCase = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = doctest.DocTestSuite(lowerCAmelCase__ )
_UpperCamelCase = unittest.TextTestRunner().run(lowerCAmelCase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
_UpperCamelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = Path('''src/transformers''' )
_UpperCamelCase = '''modeling'''
_UpperCamelCase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ , ignore_files=lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = Path('''src/transformers''' )
_UpperCamelCase = '''tokenization'''
self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = Path('''src/transformers''' )
_UpperCamelCase = '''configuration'''
self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ )
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
_UpperCamelCase = Path('''src/transformers''' )
_UpperCamelCase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(lowerCAmelCase__ , n_identifier=lowerCAmelCase__ )
def snake_case__ ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase = Path('''docs/source''' )
_UpperCamelCase = ['''favicon.ico''']
self.analyze_directory(lowerCAmelCase__ , ignore_files=lowerCAmelCase__ , only_modules=lowerCAmelCase__ )
| 98 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A__ ( __lowerCAmelCase : dict ):
return (data["data"], data["target"])
def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ):
lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
lowerCamelCase__ = xgb.predict(__lowerCAmelCase )
lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def A__ ( ):
lowerCamelCase__ = fetch_california_housing()
lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 50 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__A )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , **__A ):
super().__init__(**__A )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , __A , **__A ):
return super().__call__(__A , **__A )
def snake_case_ ( self , **__A ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def snake_case_ ( self , __A , __A=None , __A="This is a sound of {}." ):
if isinstance(__A , __A ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(__A ).content
else:
with open(__A , """rb""" ) as f:
__a = f.read()
if isinstance(__A , __A ):
__a = ffmpeg_read(__A , self.feature_extractor.sampling_rate )
if not isinstance(__A , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__a = candidate_labels
__a = [hypothesis_template.format(__A ) for x in candidate_labels]
__a = self.tokenizer(__A , return_tensors=self.framework , padding=__A )
__a = [text_inputs]
return inputs
def snake_case_ ( self , __A ):
__a = model_inputs.pop("""candidate_labels""" )
__a = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __A ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__A , **__A )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def snake_case_ ( self , __A ):
__a = model_outputs.pop("""candidate_labels""" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__A , __A ) , key=lambda __A : -x[0] )
]
return result
| 99 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A : Any = logging.get_logger(__name__)
_A : Union[str, Any] = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : str = """focalnet"""
def __init__( self , A_=2_24 , A_=4 , A_=3 , A_=96 , A_=False , A_=[1_92, 3_84, 7_68, 7_68] , A_=[2, 2, 6, 2] , A_=[2, 2, 2, 2] , A_=[3, 3, 3, 3] , A_="gelu" , A_=4.0 , A_=0.0 , A_=0.1 , A_=False , A_=1E-4 , A_=False , A_=False , A_=False , A_=0.02 , A_=1E-5 , A_=32 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = use_conv_embed
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = focal_levels
SCREAMING_SNAKE_CASE__ = focal_windows
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = use_layerscale
SCREAMING_SNAKE_CASE__ = layerscale_value
SCREAMING_SNAKE_CASE__ = use_post_layernorm
SCREAMING_SNAKE_CASE__ = use_post_layernorm_in_modulation
SCREAMING_SNAKE_CASE__ = normalize_modulator
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = encoder_stride
SCREAMING_SNAKE_CASE__ = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 100 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
from __future__ import annotations
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
SCREAMING_SNAKE_CASE_ : list[int] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : int = sum(A__ )
create_state_space_tree(A__, A__, A__, A__, A__, A__ )
return result
def a__ ( A__, A__, A__, A__, A__, A__, ):
if sum(A__ ) > max_sum or (remaining_nums_sum + sum(A__ )) < max_sum:
return
if sum(A__ ) == max_sum:
result.append(A__ )
return
for index in range(A__, len(A__ ) ):
create_state_space_tree(
A__, A__, index + 1, [*path, nums[index]], A__, remaining_nums_sum - nums[index], )
lowerCAmelCase__ : List[str] =[3, 34, 4, 12, 5, 2]
lowerCAmelCase__ : str =9
lowerCAmelCase__ : Optional[int] =generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 101 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] = PriorTransformer
__lowerCAmelCase : List[Any] = """hidden_states"""
@property
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = 4
UpperCamelCase : Dict = 8
UpperCamelCase : str = 7
UpperCamelCase : Tuple = floats_tensor((batch_size, embedding_dim) ).to(_A )
UpperCamelCase : Optional[Any] = floats_tensor((batch_size, embedding_dim) ).to(_A )
UpperCamelCase : Optional[int] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _a ( self , _A=0 ):
'''simple docstring'''
torch.manual_seed(_A )
UpperCamelCase : Optional[int] = 4
UpperCamelCase : str = 8
UpperCamelCase : Dict = 7
UpperCamelCase : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(_A )
UpperCamelCase : Tuple = torch.randn((batch_size, embedding_dim) ).to(_A )
UpperCamelCase : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _a ( self ):
'''simple docstring'''
return (4, 8)
@property
def _a ( self ):
'''simple docstring'''
return (4, 8)
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
UpperCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : int = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_A )
UpperCamelCase : str = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCamelCase : Union[str, Any] = self.model_class(**_A )
UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Any = [*signature.parameters.keys()]
UpperCamelCase : List[str] = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
UpperCamelCase : Optional[Any] = model.to(_A )
if hasattr(_A , """set_default_attn_processor""" ):
model.set_default_attn_processor()
UpperCamelCase : List[Any] = self.get_dummy_seed_input()
with torch.no_grad():
UpperCamelCase : Dict = model(**_A )[0]
UpperCamelCase : int = output[0, :5].flatten().cpu()
print(_A )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCamelCase : List[Any] = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
@slow
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self , _A=1 , _A=7_6_8 , _A=7_7 , _A=0 ):
'''simple docstring'''
torch.manual_seed(_A )
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[str] = embedding_dim
UpperCamelCase : Optional[Any] = num_embeddings
UpperCamelCase : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(_A )
UpperCamelCase : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_A )
UpperCamelCase : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : str = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(_A )
UpperCamelCase : List[str] = self.get_dummy_seed_input(seed=_A )
with torch.no_grad():
UpperCamelCase : str = model(**_A )[0]
assert list(sample.shape ) == [1, 7_6_8]
UpperCamelCase : List[Any] = sample[0, :8].flatten().cpu()
print(_A )
UpperCamelCase : Dict = torch.tensor(_A )
assert torch_all_close(_A , _A , atol=1e-3 )
| 102 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_snake_case = [[1, 2, 4], [1, 2, 3, 4]]
_snake_case = DisjunctiveConstraint(__lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCamelCase ) )
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_snake_case = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint(__lowerCamelCase ) # fails here
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = [[1, 2, 3], [1, 2, 4]]
_snake_case = DisjunctiveConstraint(__lowerCamelCase )
_snake_case , _snake_case , _snake_case = dc.update(1 )
_snake_case = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_snake_case , _snake_case , _snake_case = dc.update(2 )
_snake_case = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_snake_case , _snake_case , _snake_case = dc.update(3 )
_snake_case = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_snake_case = DisjunctiveConstraint(__lowerCamelCase )
_snake_case , _snake_case , _snake_case = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_snake_case , _snake_case , _snake_case = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_snake_case , _snake_case , _snake_case = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_snake_case , _snake_case , _snake_case = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_snake_case , _snake_case , _snake_case = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_snake_case , _snake_case , _snake_case = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_snake_case , _snake_case , _snake_case = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 103 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase : int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 50 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : int, UpperCAmelCase_ : int, UpperCAmelCase_ : int, UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
A__ = ksize + 1
A__ = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(UpperCAmelCase_ ):
for x in range(UpperCAmelCase_ ):
# distance from center
A__ = x - ksize // 2
A__ = y - ksize // 2
# degree to radiant
A__ = theta / 180 * np.pi
A__ = np.cos(_theta )
A__ = np.sin(_theta )
# get kernel x
A__ = cos_theta * px + sin_theta * py
# get kernel y
A__ = -sin_theta * px + cos_theta * py
# fill kernel
A__ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
UpperCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCamelCase = out / out.max() * 255
UpperCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 104 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [False] * len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = [-1] * len(lowerCamelCase_ )
def dfs(lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCamelCase_ , 1 - c )
for i in range(len(lowerCamelCase_ ) ):
if not visited[i]:
dfs(lowerCamelCase_ , 0 )
for i in range(len(lowerCamelCase_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase__ : Any = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 105 |
'''simple docstring'''
from PIL import Image
def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 50 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase_ ( ) -> str:
'''simple docstring'''
A = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('RGB' )
return image
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
A = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> Dict:
'''simple docstring'''
A = dct.pop(lowerCAmelCase__ )
A = val
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> int:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
A = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
A = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
A = torch.cat((q_bias, torch.zeros_like(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ ), v_bias) )
A = qkv_bias
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = 364 if 'coco' in model_name else 224
A = BlipaVisionConfig(image_size=lowerCAmelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
A = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowerCAmelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
A = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowerCAmelCase__ ).to_dict()
elif "t5-xl" in model_name:
A = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
A = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
A = BlipaConfig(vision_config=lowerCAmelCase__ , text_config=lowerCAmelCase__ )
return config, image_size
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
A = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
A = tokenizer('\n' , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
A , A = get_blipa_config(lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
A = BlipaForConditionalGeneration(lowerCAmelCase__ ).eval()
A = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
A , A = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
A = 'cuda' if torch.cuda.is_available() else 'cpu'
A , A , A = load_model_and_preprocess(
name=lowerCAmelCase__ , model_type=lowerCAmelCase__ , is_eval=lowerCAmelCase__ , device=lowerCAmelCase__ )
original_model.eval()
print('Done!' )
# update state dict keys
A = original_model.state_dict()
A = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
A = state_dict.pop(lowerCAmelCase__ )
if key.startswith('Qformer.bert' ):
A = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
A = key.replace('self' , 'attention' )
if "opt_proj" in key:
A = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
A = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
A = key.replace('opt' , 'language' )
if key.startswith('t5' ):
A = key.replace('t5' , 'language' )
A = val
# read in qv biases
read_in_q_v_bias(lowerCAmelCase__ , lowerCAmelCase__ )
A , A = hf_model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
A = load_demo_image()
A = vis_processors['eval'](lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
A = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowerCAmelCase__ )
# create processor
A = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ )
A = BlipaProcessor(image_processor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
A = processor(images=lowerCAmelCase__ , return_tensors='pt' ).pixel_values.to(lowerCAmelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
hf_model.to(lowerCAmelCase__ )
with torch.no_grad():
if "opt" in model_name:
A = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
A = hf_model(lowerCAmelCase__ , lowerCAmelCase__ ).logits
else:
A = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
A = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
A = hf_model(lowerCAmelCase__ , lowerCAmelCase__ , labels=lowerCAmelCase__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
A = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowerCAmelCase__ )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
A = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowerCAmelCase__ )
else:
# cast to same type
A = logits.dtype
assert torch.allclose(original_logits.to(lowerCAmelCase__ ) , lowerCAmelCase__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
A = ''
A = tokenizer(lowerCAmelCase__ , return_tensors='pt' ).input_ids.to(lowerCAmelCase__ )
A = original_model.generate({'image': original_pixel_values} )
A = hf_model.generate(
lowerCAmelCase__ , lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowerCAmelCase__ )
A = input_ids.shape[1]
A = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCAmelCase__ )
A = [text.strip() for text in output_text]
print('HF generation:' , lowerCAmelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCAmelCase__ )
hf_model.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case :str =argparse.ArgumentParser()
__snake_case :Optional[Any] =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__snake_case :List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 106 |
'''simple docstring'''
def A__ ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCamelCase : Dict = generate_large_matrix()
UpperCamelCase : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__ ( __lowerCAmelCase : list[list[int]] ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase__ = (left + right) // 2
lowerCamelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def A__ ( __lowerCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def A__ ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowerCamelCase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 50 | 0 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] ):
_A = min(__snake_case ) # min() finds the minimum value
_A = max(__snake_case ) # max() finds the maximum value
_A = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_A = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__snake_case , __snake_case ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_A = 0
for count in range(__snake_case ):
while holes[count] > 0:
holes[count] -= 1
_A = count + min_val
i += 1
def _SCREAMING_SNAKE_CASE ( ):
_A = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__snake_case )
print('Sorted order is:' , ' '.join(__snake_case ) )
if __name__ == "__main__":
main()
| 107 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase : List[Any] = 'examples/'
UpperCamelCase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase : Any = 'README.md'
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern]
lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start of the list.
lowerCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def A__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ = default_version.base_version
elif patch:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A__ ( ):
lowerCamelCase__ = get_version()
lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 50 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[Any]:
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
_UpperCAmelCase = np.array(__snake_case ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__snake_case )
return 2.0 * image - 1.0
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : VQModel , lowerCamelCase : UNetaDModel , lowerCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : Tuple , lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : Optional[int] = 100 , lowerCamelCase : Optional[float] = 0.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(lowerCamelCase , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(lowerCamelCase , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}""" )
if isinstance(lowerCamelCase , PIL.Image.Image ):
_UpperCAmelCase = preprocess(lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
_UpperCAmelCase = image.to(device=self.device , dtype=lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
_UpperCAmelCase = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(lowerCamelCase ).sample
_UpperCAmelCase = torch.clamp(lowerCamelCase , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 108 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __a ( _snake_case ):
def __init__( self : Optional[int] ,lowerCamelCase : Optional[NestedDataStructureLike[PathLike]] = None ,lowerCamelCase : Optional[NamedSplit] = None ,lowerCamelCase : Optional[Features] = None ,lowerCamelCase : str = None ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : Optional[int] = None ,**lowerCamelCase : Optional[int] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = path_or_paths
__SCREAMING_SNAKE_CASE = split if split or isinstance(lowerCamelCase ,lowerCamelCase ) else """train"""
__SCREAMING_SNAKE_CASE = features
__SCREAMING_SNAKE_CASE = cache_dir
__SCREAMING_SNAKE_CASE = keep_in_memory
__SCREAMING_SNAKE_CASE = streaming
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
class __a ( _snake_case ):
def __init__( self : Tuple ,lowerCamelCase : Optional[Features] = None ,lowerCamelCase : str = None ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : Optional[int] = None ,**lowerCamelCase : Optional[int] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = features
__SCREAMING_SNAKE_CASE = cache_dir
__SCREAMING_SNAKE_CASE = keep_in_memory
__SCREAMING_SNAKE_CASE = streaming
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
pass
| 109 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __lowerCAmelCase : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def A__ ( __lowerCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = set()
for token in tokens:
lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase__ = list(__lowerCAmelCase )
return word_list
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase__ = bert_tokens
lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase__ = True
if is_chinese(bert_word[start] ):
lowerCamelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase__ = """##""" + bert_word[j]
lowerCamelCase__ = start + i
lowerCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ):
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = []
for id in input_ids:
lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__ ( __lowerCAmelCase : Optional[int] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase : Any = parser.parse_args()
main(args)
| 50 | 0 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase__ = logging.getLogger(__name__)
UpperCamelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
UpperCamelCase : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase : Optional[str] = field(
default=lowercase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowercase )} , )
UpperCamelCase : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase : bool = field(
default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __snake_case ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class a :
UpperCamelCase : Optional[str] = field(
default=lowercase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase : Optional[str] = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase : Optional[str] = field(
default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase : Optional[str] = field(
default=lowercase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase : Optional[str] = field(
default=lowercase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase : bool = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase : Optional[int] = field(
default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase : bool = field(
default=lowercase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def __snake_case ( self ):
if self.train_file is not None:
UpperCAmelCase__ : str = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
UpperCAmelCase__ : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowerCamelCase ( _snake_case ,_snake_case ):
with open(_snake_case ,'r' ,encoding='utf-8' ) as f:
UpperCAmelCase__ : Optional[Any] = [json.loads(_snake_case ) for line in f.read().splitlines() if (len(_snake_case ) > 0 and not line.isspace())]
assert len(_snake_case ) == len(_snake_case )
UpperCAmelCase__ : List[str] = {c: dataset[c] for c in dataset.column_names}
UpperCAmelCase__ : List[str] = refs
return Dataset.from_dict(_snake_case )
def lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCAmelCase__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' ,_snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase__ : str = load_dataset(data_args.dataset_name ,data_args.dataset_config_name )
if "validation" not in datasets.keys():
UpperCAmelCase__ : Tuple = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=F'''train[:{data_args.validation_split_percentage}%]''' ,)
UpperCAmelCase__ : Dict = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=F'''train[{data_args.validation_split_percentage}%:]''' ,)
else:
UpperCAmelCase__ : List[Any] = {}
if data_args.train_file is not None:
UpperCAmelCase__ : Tuple = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase__ : List[str] = data_args.validation_file
UpperCAmelCase__ : List[str] = data_args.train_file.split('.' )[-1]
if extension == "txt":
UpperCAmelCase__ : Union[str, Any] = 'text'
UpperCAmelCase__ : List[Any] = load_dataset(_snake_case ,data_files=_snake_case )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__ : List[str] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(model_args.config_name ,**_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path ,**_snake_case )
else:
UpperCAmelCase__ : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
UpperCAmelCase__ : int = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,**_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,**_snake_case )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
UpperCAmelCase__ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=_snake_case ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info('Training new model from scratch' )
UpperCAmelCase__ : Optional[Any] = AutoModelForMaskedLM.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
UpperCAmelCase__ : List[str] = datasets['train'].column_names
else:
UpperCAmelCase__ : Optional[Any] = datasets['validation'].column_names
UpperCAmelCase__ : Dict = 'text' if 'text' in column_names else column_names[0]
UpperCAmelCase__ : List[Any] = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_snake_case ):
# Remove empty lines
UpperCAmelCase__ : Any = [line for line in examples['text'] if len(_snake_case ) > 0 and not line.isspace()]
return tokenizer(examples['text'] ,padding=_snake_case ,truncation=_snake_case ,max_length=data_args.max_seq_length )
UpperCAmelCase__ : List[str] = datasets.map(
_snake_case ,batched=_snake_case ,num_proc=data_args.preprocessing_num_workers ,remove_columns=[text_column_name] ,load_from_cache_file=not data_args.overwrite_cache ,)
# Add the chinese references if provided
if data_args.train_ref_file is not None:
UpperCAmelCase__ : Any = add_chinese_references(tokenized_datasets['train'] ,data_args.train_ref_file )
if data_args.validation_ref_file is not None:
UpperCAmelCase__ : Optional[Any] = add_chinese_references(
tokenized_datasets['validation'] ,data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
UpperCAmelCase__ : int = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
UpperCAmelCase__ : List[str] = False
# Data collator
# This one will take care of randomly masking the tokens.
UpperCAmelCase__ : Dict = DataCollatorForWholeWordMask(tokenizer=_snake_case ,mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase__ : int = Trainer(
model=_snake_case ,args=_snake_case ,train_dataset=tokenized_datasets['train'] if training_args.do_train else None ,eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None ,tokenizer=_snake_case ,data_collator=_snake_case ,)
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCAmelCase__ : Tuple = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
UpperCAmelCase__ : str = model_args.model_name_or_path
else:
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Any = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase__ : Optional[Any] = os.path.join(training_args.output_dir ,'train_results.txt' )
if trainer.is_world_process_zero():
with open(_snake_case ,'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir ,'trainer_state.json' ) )
# Evaluation
UpperCAmelCase__ : str = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase__ : Dict = trainer.evaluate()
UpperCAmelCase__ : Tuple = math.exp(eval_output['eval_loss'] )
UpperCAmelCase__ : str = perplexity
UpperCAmelCase__ : List[str] = os.path.join(training_args.output_dir ,'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_snake_case ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def lowerCamelCase ( _snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 110 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50 | 0 |
from math import sqrt
def __lowerCamelCase ( lowerCamelCase__ : int = 1000000 ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 457 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> Union[str, Any]:
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase__ ( ) -> Tuple:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 592 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50 | 0 |
'''simple docstring'''
import os
import sys
import transformers
__lowerCAmelCase = '3'
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 536 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''image_processor''', '''tokenizer''']
lowerCAmelCase_ = '''ViTImageProcessor'''
lowerCAmelCase_ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , __lowercase : str=None , __lowercase : int=None , **__lowercase : Dict ):
"""simple docstring"""
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _lowerCAmelCase , )
snake_case_ = kwargs.pop("feature_extractor" )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self : List[Any] , __lowercase : str=None , __lowercase : Dict=None , __lowercase : Union[str, Any]=None , __lowercase : int=None , **__lowercase : int ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
snake_case_ = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if visual_prompt is not None:
snake_case_ = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
snake_case_ = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if visual_prompt is not None and images is not None:
snake_case_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
snake_case_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
snake_case_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def snake_case__ ( self : List[str] , *__lowercase : str , **__lowercase : Optional[int] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : List[str] , *__lowercase : List[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def snake_case__ ( self : Dict ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowerCAmelCase , )
return self.image_processor_class
@property
def snake_case__ ( self : Tuple ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowerCAmelCase , )
return self.image_processor
| 376 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase__ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ = src_path
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 50 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def UpperCAmelCase_ ( _A , _A=False , _A=False , _A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dct.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = val
@torch.no_grad()
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
if "vqa" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 31_29
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = '''vqa2-id2label.json'''
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = ViltForQuestionAnswering(__lowerCAmelCase )
elif "nlvr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = {0: '''False''', 1: '''True'''}
SCREAMING_SNAKE_CASE__ = {v: k for k, v in config.idalabel.items()}
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = ViltForImagesAndTextClassification(__lowerCAmelCase )
elif "irtr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = ViltForImageAndTextRetrieval(__lowerCAmelCase )
elif "mlm_itm" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = ViltForMaskedLM(__lowerCAmelCase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' )['''state_dict''']
SCREAMING_SNAKE_CASE__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
if mlm_model or irtr_model:
SCREAMING_SNAKE_CASE__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowerCAmelCase )
# Define processor
SCREAMING_SNAKE_CASE__ = ViltImageProcessor(size=3_84 )
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
SCREAMING_SNAKE_CASE__ = ViltProcessor(__lowerCAmelCase , __lowerCAmelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
SCREAMING_SNAKE_CASE__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowerCAmelCase ).raw )
SCREAMING_SNAKE_CASE__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowerCAmelCase ).raw )
SCREAMING_SNAKE_CASE__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
SCREAMING_SNAKE_CASE__ = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
SCREAMING_SNAKE_CASE__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowerCAmelCase ).raw )
if mlm_model:
SCREAMING_SNAKE_CASE__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
SCREAMING_SNAKE_CASE__ = '''How many cats are there?'''
SCREAMING_SNAKE_CASE__ = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = model(**__lowerCAmelCase )
# Verify outputs
if mlm_model:
SCREAMING_SNAKE_CASE__ = torch.Size([1, 11, 3_05_22] )
SCREAMING_SNAKE_CASE__ = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowerCAmelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
SCREAMING_SNAKE_CASE__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
SCREAMING_SNAKE_CASE__ = torch.Size([1, 31_29] )
SCREAMING_SNAKE_CASE__ = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowerCAmelCase , atol=1e-4 )
# verify vqa prediction equals "2"
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
SCREAMING_SNAKE_CASE__ = torch.Size([1, 2] )
SCREAMING_SNAKE_CASE__ = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 493 |
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCamelCase__ = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 50 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ (self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , use_stable_embedding=_lowerCAmelCase , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = OpenLlamaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
UpperCamelCase__ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = True
UpperCamelCase__ = OpenLlamaModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
UpperCamelCase__ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
UpperCamelCase__ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = OpenLlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = OpenLlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
UpperCamelCase__ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , )
UpperCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )["""hidden_states"""][0]
UpperCamelCase__ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = OpenLlamaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = input_dict["""input_ids"""]
UpperCamelCase__ = input_ids.ne(1 ).to(_lowerCAmelCase )
UpperCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ = OpenLlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = """single_label_classification"""
UpperCamelCase__ = input_dict["""input_ids"""]
UpperCamelCase__ = input_ids.ne(1 ).to(_lowerCAmelCase )
UpperCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ = OpenLlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = """multi_label_classification"""
UpperCamelCase__ = input_dict["""input_ids"""]
UpperCamelCase__ = input_ids.ne(1 ).to(_lowerCAmelCase )
UpperCamelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ = OpenLlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase_ (self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = OpenLlamaModel(_lowerCAmelCase )
original_model.to(_lowerCAmelCase )
original_model.eval()
UpperCamelCase__ = original_model(_lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = original_model(_lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = {"""type""": scaling_type, """factor""": 10.0}
UpperCamelCase__ = OpenLlamaModel(_lowerCAmelCase )
scaled_model.to(_lowerCAmelCase )
scaled_model.eval()
UpperCamelCase__ = scaled_model(_lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = scaled_model(_lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
| 513 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50 | 0 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int=[] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size[0] - overlap_pixels * 2
_SCREAMING_SNAKE_CASE =size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_SCREAMING_SNAKE_CASE =np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
_SCREAMING_SNAKE_CASE =np.pad(__lowerCAmelCase , mode='linear_ramp' , pad_width=__lowerCAmelCase , end_values=0 )
if "l" in remove_borders:
_SCREAMING_SNAKE_CASE =mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_SCREAMING_SNAKE_CASE =mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_SCREAMING_SNAKE_CASE =mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_SCREAMING_SNAKE_CASE =mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
return max(__lowerCAmelCase , min(__lowerCAmelCase , __lowerCAmelCase ) )
def _lowerCAmelCase ( _UpperCamelCase : [int] , _UpperCamelCase : [int] , _UpperCamelCase : [int] ) -> Optional[Any]:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _lowerCAmelCase ( _UpperCamelCase : [int] , _UpperCamelCase : int , _UpperCamelCase : [int] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =list(__lowerCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_SCREAMING_SNAKE_CASE =clamp_rect(__lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__lowerCAmelCase , (original_slice, 0) )
return result
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(original_image_slice * 4, 0, tile.size[0], tile.size[1])
_SCREAMING_SNAKE_CASE =tile.crop(__lowerCAmelCase )
return tile
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =n % d
return n - divisor
class A__ ( A__ ):
def __init__( self : Union[str, Any] , _a : List[str] , _a : Dict , _a : Optional[Any] , _a : Tuple , _a : Any , _a : List[str] , _a : List[str] = 350 , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , low_res_scheduler=_lowerCAmelCase , scheduler=_lowerCAmelCase , max_noise_level=_lowerCAmelCase , )
def A ( self : Dict , _a : int , _a : str , _a : int , _a : Optional[int] , _a : Any , _a : Tuple , _a : Tuple , **_a : int ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =(
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_SCREAMING_SNAKE_CASE =add_overlap_rect(_lowerCAmelCase , _lowerCAmelCase , image.size )
_SCREAMING_SNAKE_CASE =image.crop(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_SCREAMING_SNAKE_CASE =translated_slice_x - (original_image_slice / 2)
_SCREAMING_SNAKE_CASE =max(0 , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE =squeeze_tile(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE =to_input.size
_SCREAMING_SNAKE_CASE =to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_SCREAMING_SNAKE_CASE =super(_lowerCAmelCase , self ).__call__(image=_lowerCAmelCase , **_lowerCAmelCase ).images[0]
_SCREAMING_SNAKE_CASE =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_SCREAMING_SNAKE_CASE =unsqueeze_tile(_lowerCAmelCase , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_SCREAMING_SNAKE_CASE =[]
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
_SCREAMING_SNAKE_CASE =Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=_lowerCAmelCase ) , mode='L' , )
final_image.paste(
_lowerCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , _lowerCAmelCase )
@torch.no_grad()
def __call__( self : Any , _a : Optional[int] , _a : Any , _a : Any = 75 , _a : Dict = 9.0 , _a : Optional[Any] = 50 , _a : str = None , _a : Optional[Any] = 1 , _a : Dict = 0.0 , _a : List[Any] = None , _a : Optional[int] = None , _a : Any = None , _a : Tuple = 1 , _a : List[str] = 128 , _a : List[Any] = 32 , _a : Dict = 32 , ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
_SCREAMING_SNAKE_CASE =math.ceil(image.size[0] / tile_size )
_SCREAMING_SNAKE_CASE =math.ceil(image.size[1] / tile_size )
_SCREAMING_SNAKE_CASE =tcx * tcy
_SCREAMING_SNAKE_CASE =0
for y in range(_lowerCAmelCase ):
for x in range(_lowerCAmelCase ):
self._process_tile(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prompt=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , noise_level=_lowerCAmelCase , negative_prompt=_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase , eta=_lowerCAmelCase , generator=_lowerCAmelCase , latents=_lowerCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-x4-upscaler'
_SCREAMING_SNAKE_CASE =StableDiffusionTiledUpscalePipeline.from_pretrained(__lowerCAmelCase , revision='fp16' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipe.to('cuda' )
_SCREAMING_SNAKE_CASE =Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(_UpperCamelCase : str ):
print(f"progress: {obj['progress']:.4f}" )
obj["image"].save('diffusers_library_progress.jpg' )
_SCREAMING_SNAKE_CASE =pipe(image=__lowerCAmelCase , prompt='Black font, white background, vector' , noise_level=40 , callback=__lowerCAmelCase )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 405 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,_lowerCAmelCase ,)
super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=30 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=None , ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = parent
__snake_case : Dict = batch_size
__snake_case : Any = image_size
__snake_case : List[str] = patch_size
__snake_case : str = num_channels
__snake_case : Tuple = is_training
__snake_case : Union[str, Any] = use_labels
__snake_case : Union[str, Any] = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Optional[int] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case : Dict = (image_size // patch_size) ** 2
__snake_case : List[str] = num_patches + 1
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = ViTMSNModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.type_sequence_label_size
__snake_case : List[str] = ViTMSNForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : int = 1
__snake_case : Optional[int] = ViTMSNForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Any =(ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ : Union[str, Any] =(
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ : str =False
UpperCAmelCase_ : Union[str, Any] =False
UpperCAmelCase_ : List[Any] =False
UpperCAmelCase_ : List[str] =False
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : List[Any] = ViTMSNModelTester(self )
__snake_case : Tuple = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
pass
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Any = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(_lowerCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = ViTMSNModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase__( ) -> Optional[int]:
__snake_case : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(2 )
__snake_case : List[str] = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(_lowerCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : Tuple = prepare_img()
__snake_case : str = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : str = model(**_lowerCAmelCase )
# verify the logits
__snake_case : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__snake_case : Union[str, Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 243 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowerCamelCase__ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase : Dict = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase : str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 50 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=1_3 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : Optional[Any]=[1, 2, 1] , __lowerCAmelCase : str=[2, 2, 4] , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=2.0 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-5 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : Union[str, Any]=8 , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = patch_norm
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = is_training
__snake_case = scope
__snake_case = use_labels
__snake_case = type_sequence_label_size
__snake_case = encoder_stride
def lowercase__ ( self : int ):
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Any ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase__ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ):
__snake_case = SwinvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case = model(_lowerCAmelCase )
__snake_case = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ):
__snake_case = SwinvaForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case = 1
__snake_case = SwinvaForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
__snake_case = self.type_sequence_label_size
__snake_case = SwinvaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Tuple ):
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : str = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowercase_ : int = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ : int = False
lowercase_ : str = False
lowercase_ : str = False
lowercase_ : Union[str, Any] = False
def lowercase__ ( self : Optional[Any] ):
__snake_case = SwinvaModelTester(self )
__snake_case = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=3_7 )
def lowercase__ ( self : str ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def lowercase__ ( self : int ):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : Any ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def lowercase__ ( self : List[str] ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowercase__ ( self : Dict ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
for model_class in self.all_model_classes:
__snake_case = True
__snake_case = False
__snake_case = True
__snake_case = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case = outputs.attentions
__snake_case = len(self.model_tester.depths )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case = True
__snake_case = config.window_size**2
__snake_case = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__snake_case = len(_lowerCAmelCase )
# Check attention is always last and order is fine
__snake_case = True
__snake_case = True
__snake_case = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
__snake_case = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__snake_case = 2
self.assertEqual(out_len + added_hidden_states , len(_lowerCAmelCase ) )
__snake_case = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
__snake_case = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case = outputs.hidden_states
__snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swinv2 has a different seq_length
__snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__snake_case = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__snake_case , __snake_case , __snake_case , __snake_case = reshaped_hidden_states[0].shape
__snake_case = (
reshaped_hidden_states[0].view(_lowerCAmelCase , _lowerCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : Any ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__snake_case = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowercase__ ( self : Tuple ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__snake_case = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
def lowercase__ ( self : str ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def lowercase__ ( self : Optional[Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowercase__ ( self : int ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = SwinvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowercase__ ( self : List[Any] ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
__snake_case = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : List[Any] ):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : str ):
__snake_case = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
_lowerCAmelCase )
__snake_case = self.default_image_processor
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__snake_case = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case = model(**_lowerCAmelCase )
# verify the logits
__snake_case = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__snake_case = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 356 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
__magic_name__ : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__magic_name__ : List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__magic_name__ : Dict = np.concatenate(__lowerCAmelCase , axis=0 )
__magic_name__ : Union[str, Any] = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
__magic_name__ : Union[str, Any] = image.transpose(0 , 3 , 1 , 2 )
__magic_name__ : Optional[Any] = 2.0 * image - 1.0
__magic_name__ : Dict = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
__magic_name__ : Optional[Any] = torch.cat(__lowerCAmelCase , dim=0 )
return image
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Any=0.9995 ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , np.ndarray ):
__magic_name__ : Optional[int] = True
__magic_name__ : str = va.device
__magic_name__ : Any = va.cpu().numpy()
__magic_name__ : Any = va.cpu().numpy()
__magic_name__ : Dict = np.sum(va * va / (np.linalg.norm(__lowerCAmelCase ) * np.linalg.norm(__lowerCAmelCase )) )
if np.abs(__lowerCAmelCase ) > DOT_THRESHOLD:
__magic_name__ : int = (1 - t) * va + t * va
else:
__magic_name__ : Any = np.arccos(__lowerCAmelCase )
__magic_name__ : Optional[int] = np.sin(__lowerCAmelCase )
__magic_name__ : str = theta_a * t
__magic_name__ : Optional[int] = np.sin(__lowerCAmelCase )
__magic_name__ : List[str] = np.sin(theta_a - theta_t ) / sin_theta_a
__magic_name__ : Optional[Any] = sin_theta_t / sin_theta_a
__magic_name__ : List[str] = sa * va + sa * va
if inputs_are_torch:
__magic_name__ : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
return va
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
__magic_name__ : Tuple = F.normalize(__lowerCAmelCase , dim=-1 )
__magic_name__ : Dict = F.normalize(__lowerCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ):
"""simple docstring"""
for param in model.parameters():
__magic_name__ : Any = value
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : int , _A : str , _A : List[str] , _A : List[Any] , _A : Dict , _A : List[str] , _A : Tuple , _A : Any=None , _A : Tuple=None , _A : Tuple=None , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , clip_model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , coca_model=_lowerCAmelCase , coca_tokenizer=_lowerCAmelCase , coca_transform=_lowerCAmelCase , )
__magic_name__ : List[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , _lowerCAmelCase )
else feature_extractor.size['shortest_edge']
)
__magic_name__ : int = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _lowerCAmelCase )
set_requires_grad(self.clip_model , _lowerCAmelCase )
def __lowerCAmelCase ( self : Tuple , _A : Union[str, Any] = "auto" ) -> str:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__magic_name__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
self.enable_attention_slicing(_lowerCAmelCase )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
set_requires_grad(self.vae , _lowerCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
set_requires_grad(self.vae , _lowerCAmelCase )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
set_requires_grad(self.unet , _lowerCAmelCase )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
set_requires_grad(self.unet , _lowerCAmelCase )
def __lowerCAmelCase ( self : Any , _A : List[str] , _A : Union[str, Any] , _A : Optional[int] ) -> Dict:
# get the original timestep using init_timestep
__magic_name__ : Union[str, Any] = min(int(num_inference_steps * strength ) , _lowerCAmelCase )
__magic_name__ : Tuple = max(num_inference_steps - init_timestep , 0 )
__magic_name__ : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCAmelCase ( self : Union[str, Any] , _A : int , _A : List[Any] , _A : Tuple , _A : str , _A : str , _A : List[str]=None ) -> List[Any]:
if not isinstance(_lowerCAmelCase , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(_lowerCAmelCase )}' )
__magic_name__ : int = image.to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__magic_name__ : Tuple = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowerCAmelCase )
]
__magic_name__ : str = torch.cat(_lowerCAmelCase , dim=0 )
else:
__magic_name__ : Any = self.vae.encode(_lowerCAmelCase ).latent_dist.sample(_lowerCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__magic_name__ : List[Any] = 0.1_8215 * init_latents
__magic_name__ : int = init_latents.repeat_interleave(_lowerCAmelCase , dim=0 )
__magic_name__ : Union[str, Any] = randn_tensor(init_latents.shape , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
# get latents
__magic_name__ : Optional[int] = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__magic_name__ : Tuple = init_latents
return latents
def __lowerCAmelCase ( self : int , _A : Dict ) -> Union[str, Any]:
__magic_name__ : Dict = self.coca_transform(_lowerCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__magic_name__ : int = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__magic_name__ : List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def __lowerCAmelCase ( self : List[str] , _A : Tuple , _A : int ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.feature_extractor.preprocess(_lowerCAmelCase )
__magic_name__ : Union[str, Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
__magic_name__ : List[Any] = self.clip_model.get_image_features(_lowerCAmelCase )
__magic_name__ : int = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowerCAmelCase )
__magic_name__ : Any = image_embeddings_clip.repeat_interleave(_lowerCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __lowerCAmelCase ( self : List[Any] , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[Any] , ) -> List[str]:
__magic_name__ : Optional[int] = latents.detach().requires_grad_()
__magic_name__ : Any = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
__magic_name__ : List[str] = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__magic_name__ : List[Any] = self.scheduler.alphas_cumprod[timestep]
__magic_name__ : List[str] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__magic_name__ : Dict = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__magic_name__ : Dict = torch.sqrt(_lowerCAmelCase )
__magic_name__ : Optional[Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _lowerCAmelCase ):
__magic_name__ : List[Any] = self.scheduler.sigmas[index]
__magic_name__ : Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__magic_name__ : int = 1 / 0.1_8215 * sample
__magic_name__ : str = self.vae.decode(_lowerCAmelCase ).sample
__magic_name__ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : str = transforms.Resize(self.feature_extractor_size )(_lowerCAmelCase )
__magic_name__ : int = self.normalize(_lowerCAmelCase ).to(latents.dtype )
__magic_name__ : Optional[int] = self.clip_model.get_image_features(_lowerCAmelCase )
__magic_name__ : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowerCAmelCase )
__magic_name__ : List[str] = spherical_dist_loss(_lowerCAmelCase , _lowerCAmelCase ).mean() * clip_guidance_scale
__magic_name__ : str = -torch.autograd.grad(_lowerCAmelCase , _lowerCAmelCase )[0]
if isinstance(self.scheduler , _lowerCAmelCase ):
__magic_name__ : List[Any] = latents.detach() + grads * (sigma**2)
__magic_name__ : str = noise_pred_original
else:
__magic_name__ : str = noise_pred_original - torch.sqrt(_lowerCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , _A : Any , _A : int , _A : Tuple = None , _A : Tuple = None , _A : Optional[int] = 512 , _A : Union[str, Any] = 512 , _A : Any = 0.6 , _A : Any = 50 , _A : Union[str, Any] = 7.5 , _A : Union[str, Any] = 1 , _A : Union[str, Any] = 0.0 , _A : int = 100 , _A : Optional[int] = None , _A : List[str] = "pil" , _A : int = True , _A : Any = 0.8 , _A : Any = 0.1 , _A : List[str] = 0.1 , ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(_lowerCAmelCase )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(_lowerCAmelCase , torch.Generator ) and batch_size > 1:
__magic_name__ : Dict = [generator] + [None] * (batch_size - 1)
__magic_name__ : str = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
__magic_name__ : Dict = [x[0] for x in coca_is_none if x[1]]
__magic_name__ : Any = ', '.join(_lowerCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__magic_name__ : Dict = self.get_image_description(_lowerCAmelCase )
if style_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__magic_name__ : int = self.get_image_description(_lowerCAmelCase )
# get prompt text embeddings for content and style
__magic_name__ : Tuple = self.tokenizer(
_lowerCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='pt' , )
__magic_name__ : Optional[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__magic_name__ : str = self.tokenizer(
_lowerCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='pt' , )
__magic_name__ : str = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__magic_name__ : List[str] = slerp(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
__magic_name__ : List[Any] = text_embeddings.repeat_interleave(_lowerCAmelCase , dim=0 )
# set timesteps
__magic_name__ : List[Any] = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__magic_name__ : Union[str, Any] = {}
if accepts_offset:
__magic_name__ : List[Any] = 1
self.scheduler.set_timesteps(_lowerCAmelCase , **_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__magic_name__ , __magic_name__ : int = self.get_timesteps(_lowerCAmelCase , _lowerCAmelCase , self.device )
__magic_name__ : str = timesteps[:1].repeat(_lowerCAmelCase )
# Preprocess image
__magic_name__ : Optional[int] = preprocess(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__magic_name__ : str = self.prepare_latents(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text_embeddings.dtype , self.device , _lowerCAmelCase )
__magic_name__ : Optional[Any] = preprocess(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__magic_name__ : List[str] = self.prepare_latents(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text_embeddings.dtype , self.device , _lowerCAmelCase )
__magic_name__ : Dict = slerp(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if clip_guidance_scale > 0:
__magic_name__ : List[str] = self.get_clip_image_embeddings(_lowerCAmelCase , _lowerCAmelCase )
__magic_name__ : int = self.get_clip_image_embeddings(_lowerCAmelCase , _lowerCAmelCase )
__magic_name__ : List[str] = slerp(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__magic_name__ : List[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__magic_name__ : str = content_text_input.input_ids.shape[-1]
__magic_name__ : int = self.tokenizer([''] , padding='max_length' , max_length=_lowerCAmelCase , return_tensors='pt' )
__magic_name__ : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__magic_name__ : Optional[Any] = uncond_embeddings.repeat_interleave(_lowerCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__magic_name__ : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__magic_name__ : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__magic_name__ : List[Any] = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device='cpu' , dtype=_lowerCAmelCase ).to(
self.device )
else:
__magic_name__ : Tuple = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__magic_name__ : Dict = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__magic_name__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__magic_name__ : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__magic_name__ : Optional[int] = {}
if accepts_eta:
__magic_name__ : Any = eta
# check if the scheduler accepts generator
__magic_name__ : Any = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__magic_name__ : List[Any] = generator
with self.progress_bar(total=_lowerCAmelCase ):
for i, t in enumerate(_lowerCAmelCase ):
# expand the latents if we are doing classifier free guidance
__magic_name__ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ : Optional[Any] = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
__magic_name__ : List[str] = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ : List[Any] = noise_pred.chunk(2 )
__magic_name__ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__magic_name__ : str = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__magic_name__ , __magic_name__ : Optional[int] = self.cond_fn(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : List[str] = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__magic_name__ : Optional[Any] = 1 / 0.1_8215 * latents
__magic_name__ : str = self.vae.decode(_lowerCAmelCase ).sample
__magic_name__ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : List[Any] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_lowerCAmelCase , nsfw_content_detected=_lowerCAmelCase )
| 561 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A__ ( __lowerCAmelCase : dict ):
return (data["data"], data["target"])
def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ):
lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
lowerCamelCase__ = xgb.predict(__lowerCAmelCase )
lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def A__ ( ):
lowerCamelCase__ = fetch_california_housing()
lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 50 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
lowerCAmelCase_ = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
lowerCAmelCase_ = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
lowerCAmelCase_ = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
lowerCAmelCase_ = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModel)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 173 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = [[1, 2, 4], [1, 2, 3, 4]]
lowerCamelCase = DisjunctiveConstraint(_lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCAmelCase ) )
with self.assertRaises(_lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCAmelCase ):
DisjunctiveConstraint(_lowerCAmelCase ) # fails here
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = [[1, 2, 3], [1, 2, 4]]
lowerCamelCase = DisjunctiveConstraint(_lowerCAmelCase )
lowerCamelCase , lowerCamelCase , lowerCamelCase = dc.update(1 )
lowerCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase , lowerCamelCase , lowerCamelCase = dc.update(2 )
lowerCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase , lowerCamelCase , lowerCamelCase = dc.update(3 )
lowerCamelCase = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCamelCase = DisjunctiveConstraint(_lowerCAmelCase )
lowerCamelCase , lowerCamelCase , lowerCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase , lowerCamelCase , lowerCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase , lowerCamelCase , lowerCamelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCamelCase , lowerCamelCase , lowerCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCamelCase , lowerCamelCase , lowerCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase , lowerCamelCase , lowerCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase , lowerCamelCase , lowerCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 457 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase__ ( snake_case_ : dict ) -> Dict:
return (data["data"], data["target"])
def lowerCamelCase__ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> Tuple:
__snake_case = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
__snake_case = xgb.predict(__lowerCAmelCase )
__snake_case = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = fetch_california_housing()
__snake_case , __snake_case = data_handling(__lowerCAmelCase )
__snake_case , __snake_case , __snake_case , __snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
__snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 592 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
a_ = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) )
def _a ( self ):
"""simple docstring"""
a_ = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) )
def _a ( self ):
"""simple docstring"""
a_ = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowerCAmelCase ) )
def _a ( self ):
"""simple docstring"""
a_ = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) )
def _a ( self ):
"""simple docstring"""
a_ = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowerCAmelCase ) )
def _a ( self ):
"""simple docstring"""
a_ = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
a_ = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _a ( self ):
"""simple docstring"""
a_ = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
a_ = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _a ( self ):
"""simple docstring"""
a_ = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
a_ = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _a ( self ):
"""simple docstring"""
a_ = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a_ = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _a ( self ):
"""simple docstring"""
a_ = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
a_ = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _a ( self ):
"""simple docstring"""
a_ = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
a_ = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _a ( self ):
"""simple docstring"""
a_ = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
a_ = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
| 536 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : List[str] , __lowercase : Tuple , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = jnp.ones((batch_size, length) ) / length
return scores
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = None
snake_case_ = 20
snake_case_ = self._get_uniform_logits(batch_size=2 , length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
snake_case_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
snake_case_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
snake_case_ = jax.nn.softmax(_lowerCAmelCase , axis=-1 )
snake_case_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
snake_case_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
snake_case_ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
snake_case_ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = None
snake_case_ = 10
snake_case_ = 2
# create ramp distribution
snake_case_ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
snake_case_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
snake_case_ = FlaxTopKLogitsWarper(3 )
snake_case_ = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
snake_case_ = 5
snake_case_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
snake_case_ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
snake_case_ = top_k_warp_safety_check(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = None
snake_case_ = 10
snake_case_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
snake_case_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
snake_case_ = FlaxTopPLogitsWarper(0.8 )
snake_case_ = np.exp(top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
snake_case_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
snake_case_ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
snake_case_ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
snake_case_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
snake_case_ = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = 20
snake_case_ = 4
snake_case_ = 0
snake_case_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
snake_case_ = ids_tensor((batch_size, 20) , vocab_size=20 )
snake_case_ = 5
snake_case_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
snake_case_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = 15
snake_case_ = min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = 20
snake_case_ = 4
snake_case_ = 0
snake_case_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
snake_case_ = ids_tensor((batch_size, 1) , vocab_size=20 )
snake_case_ = 1
snake_case_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
snake_case_ = 3
snake_case_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = 20
snake_case_ = 4
snake_case_ = 0
snake_case_ = 5
snake_case_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
snake_case_ = ids_tensor((batch_size, 4) , vocab_size=20 )
snake_case_ = 4
snake_case_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
snake_case_ = 3
snake_case_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = 4
snake_case_ = 10
snake_case_ = 15
snake_case_ = 2
snake_case_ = 1
snake_case_ = 15
# dummy input_ids and scores
snake_case_ = ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
snake_case_ = input_ids.copy()
snake_case_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = scores.copy()
# instantiate all dist processors
snake_case_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
snake_case_ = FlaxTopKLogitsWarper(3 )
snake_case_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
snake_case_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
snake_case_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
snake_case_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
snake_case_ = 10
# no processor list
snake_case_ = temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
snake_case_ = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
snake_case_ = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
snake_case_ = min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
snake_case_ = bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
snake_case_ = eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# with processor list
snake_case_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
snake_case_ = processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = 4
snake_case_ = 10
snake_case_ = 15
snake_case_ = 2
snake_case_ = 1
snake_case_ = 15
# dummy input_ids and scores
snake_case_ = ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
snake_case_ = input_ids.copy()
snake_case_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = scores.copy()
# instantiate all dist processors
snake_case_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
snake_case_ = FlaxTopKLogitsWarper(3 )
snake_case_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
snake_case_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
snake_case_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
snake_case_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
snake_case_ = 10
# no processor list
def run_no_processor_list(__lowercase : Any , __lowercase : str , __lowercase : List[str] ):
snake_case_ = temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
snake_case_ = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
snake_case_ = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
snake_case_ = min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
snake_case_ = bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
snake_case_ = eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(__lowercase : int , __lowercase : Optional[int] , __lowercase : str ):
snake_case_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
snake_case_ = processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
snake_case_ = jax.jit(_lowerCAmelCase )
snake_case_ = jax.jit(_lowerCAmelCase )
snake_case_ = jitted_run_no_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case_ = jitted_run_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 376 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase : int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 50 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 493 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 513 |
'''simple docstring'''
from PIL import Image
def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 50 | 0 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase : int = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
lowerCamelCase : Union[str, Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase : Optional[int] = importlib.import_module("transformers")
lowerCamelCase : int = Path(transformers_module.__file__).parent
else:
lowerCamelCase : List[str] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 405 |
'''simple docstring'''
def A__ ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCamelCase : Dict = generate_large_matrix()
UpperCamelCase : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__ ( __lowerCAmelCase : list[list[int]] ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase__ = (left + right) // 2
lowerCamelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def A__ ( __lowerCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def A__ ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowerCamelCase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 50 | 0 |
import os
_UpperCamelCase = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
def lowerCAmelCase__( lowercase : str ) -> Union[str, Any]:
__snake_case : Optional[int] = 0
__snake_case : List[str] = 0
while index < len(__lowerCAmelCase ) - 1:
__snake_case : int = SYMBOLS[numerals[index]]
__snake_case : Optional[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowerCAmelCase__( lowercase : int ) -> Tuple:
__snake_case : Any = ""
__snake_case : str = num // 1000
numerals += m_count * "M"
num %= 1000
__snake_case : Tuple = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__snake_case : Optional[int] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowerCAmelCase__( lowercase : str = "/p089_roman.txt" ) -> Dict:
__snake_case : Optional[Any] = 0
with open(os.path.dirname(__lowerCAmelCase ) + roman_numerals_filename ) as filea:
__snake_case : List[str] = filea.readlines()
for line in lines:
__snake_case : Optional[int] = line.strip()
__snake_case : List[str] = parse_roman_numerals(__lowerCAmelCase )
__snake_case : str = generate_roman_numerals(__lowerCAmelCase )
savings += len(__lowerCAmelCase ) - len(__lowerCAmelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 243 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase : List[Any] = 'examples/'
UpperCamelCase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase : Any = 'README.md'
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern]
lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start of the list.
lowerCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def A__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ = default_version.base_version
elif patch:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A__ ( ):
lowerCamelCase__ = get_version()
lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 50 | 0 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowercase = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = None , __lowerCAmelCase : Optional[int] = None ):
__snake_case = None
__snake_case = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
__snake_case = os.path.abspath('examples' )
for item in os.listdir(_lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
__snake_case = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowerCAmelCase , feature_script=_lowerCAmelCase , tested_section='main()' if parser_only else 'training_function()' , ):
__snake_case = compare_against_test(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case = '\n'.join(_lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
__snake_case = diff.replace(_lowerCAmelCase , '' )
self.assertEqual(_lowerCAmelCase , '' )
def lowercase__ ( self : Optional[int] ):
self.one_complete_example('complete_nlp_example.py' , _lowerCAmelCase )
self.one_complete_example('complete_nlp_example.py' , _lowerCAmelCase )
def lowercase__ ( self : Tuple ):
__snake_case = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
__snake_case = [
' ' * 1_6 + '{\n\n',
' ' * 2_0 + '\"accuracy\": eval_metric[\"accuracy\"],\n\n',
' ' * 2_0 + '\"f1\": eval_metric[\"f1\"],\n\n',
' ' * 2_0 + '\"train_loss\": total_loss.item() / len(train_dataloader),\n\n',
' ' * 2_0 + '\"epoch\": epoch,\n\n',
' ' * 1_6 + '},\n\n',
' ' * 1_6 + 'step=epoch,\n',
' ' * 1_2,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.one_complete_example('complete_cv_example.py' , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class a_ ( UpperCAmelCase__ ):
lowercase_ : Tuple = False
@classmethod
def lowercase__ ( cls : Optional[int] ):
super().setUpClass()
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__snake_case = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowercase__ ( cls : List[str] ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase__ ( self : int ):
__snake_case = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def lowercase__ ( self : Optional[int] ):
__snake_case = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__snake_case = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def lowercase__ ( self : List[Any] ):
__snake_case = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__snake_case = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
self.assertNotIn('epoch 0:' , _lowerCAmelCase )
self.assertIn('epoch 1:' , _lowerCAmelCase )
def lowercase__ ( self : Optional[Any] ):
__snake_case = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__snake_case = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
if torch.cuda.is_available():
__snake_case = torch.cuda.device_count()
else:
__snake_case = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , _lowerCAmelCase )
self.assertIn('epoch 1:' , _lowerCAmelCase )
else:
self.assertIn('epoch 0:' , _lowerCAmelCase )
self.assertIn('epoch 1:' , _lowerCAmelCase )
@slow
def lowercase__ ( self : List[str] ):
__snake_case = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
__snake_case = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
__snake_case = re.findall('({.+})' , _lowerCAmelCase )
__snake_case = [r for r in results if 'accuracy' in r][-1]
__snake_case = ast.literal_eval(_lowerCAmelCase )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase__ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdir:
__snake_case = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'tracking' ) ) )
def lowercase__ ( self : int ):
__snake_case = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def lowercase__ ( self : Dict ):
__snake_case = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 561 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __lowerCAmelCase : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def A__ ( __lowerCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = set()
for token in tokens:
lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase__ = list(__lowerCAmelCase )
return word_list
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase__ = bert_tokens
lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase__ = True
if is_chinese(bert_word[start] ):
lowerCamelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase__ = """##""" + bert_word[j]
lowerCamelCase__ = start + i
lowerCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ):
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = []
for id in input_ids:
lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__ ( __lowerCAmelCase : Optional[int] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase : Any = parser.parse_args()
main(args)
| 50 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( A : int):
'''simple docstring'''
UpperCamelCase : List[Any] = 2
UpperCamelCase : Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowerCAmelCase)
if n > 1:
factors.append(__lowerCAmelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowercase ( a_ ):
"""simple docstring"""
def __A ( self , A ) -> List[Any]:
'''simple docstring'''
return 0.0
def __lowerCamelCase ( lowerCamelCase__ : np.ndarray , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowerCamelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( lowerCamelCase__ : FilterType , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = 512
lowerCamelCase = [1] + [0] * (size - 1)
lowerCamelCase = [filter_type.process(__lowerCAmelCase ) for item in inputs]
lowerCamelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase = np.abs(np.fft.fft(__lowerCAmelCase ) )
lowerCamelCase = 20 * np.logaa(__lowerCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
lowerCamelCase = get_bounds(__lowerCAmelCase , __lowerCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__lowerCAmelCase )
plt.show()
def __lowerCamelCase ( lowerCamelCase__ : FilterType , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = 512
lowerCamelCase = [1] + [0] * (size - 1)
lowerCamelCase = [filter_type.process(__lowerCAmelCase ) for item in inputs]
lowerCamelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase = np.angle(np.fft.fft(__lowerCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__lowerCAmelCase , -2 * pi ) )
plt.show()
| 457 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = 'philschmid/bart-large-cnn-samsum'
A_ : List[str] = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
A_ : Optional[Any] = 'summarizer'
A_ : Optional[int] = AutoTokenizer
A_ : List[Any] = AutoModelForSeqaSeqLM
A_ : Any = ['text']
A_ : Union[str, Any] = ['text']
def a (self : List[str] , a__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor(_lowerCAmelCase , return_tensors='''pt''' , truncation=_lowerCAmelCase )
def a (self : Union[str, Any] , a__ : Dict ):
"""simple docstring"""
return self.model.generate(**_lowerCAmelCase )[0]
def a (self : str , a__ : Optional[int] ):
"""simple docstring"""
return self.pre_processor.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
| 592 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50 | 0 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __UpperCamelCase ( lowercase_ : Tuple , lowercase_ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Dict=0 ):
"""simple docstring"""
a_ = []
for old_item in old_list:
a_ = old_item.replace('in_layers.0' , 'norm1' )
a_ = new_item.replace('in_layers.2' , 'conv1' )
a_ = new_item.replace('out_layers.0' , 'norm2' )
a_ = new_item.replace('out_layers.3' , 'conv2' )
a_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
a_ = new_item.replace('skip_connection' , 'conv_shortcut' )
a_ = shave_segments(__lowerCAmelCase , n_shave_prefix_segments=__lowerCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __UpperCamelCase ( lowercase_ : Tuple , lowercase_ : List[str]=0 ):
"""simple docstring"""
a_ = []
for old_item in old_list:
a_ = old_item
a_ = new_item.replace('norm.weight' , 'group_norm.weight' )
a_ = new_item.replace('norm.bias' , 'group_norm.bias' )
a_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
a_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
a_ = shave_segments(__lowerCAmelCase , n_shave_prefix_segments=__lowerCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : List[str]=None , lowercase_ : str=None , lowercase_ : Tuple=None ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
a_ = old_checkpoint[path]
a_ = old_tensor.shape[0] // 3
a_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
a_ = old_tensor.shape[0] // config['num_head_channels'] // 3
a_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
a_ , a_ , a_ = old_tensor.split(channels // num_heads , dim=1 )
a_ = query.reshape(__lowerCAmelCase )
a_ = key.reshape(__lowerCAmelCase )
a_ = value.reshape(__lowerCAmelCase )
for path in paths:
a_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
a_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
a_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
a_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
a_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
a_ = old_checkpoint[path['old']][:, :, 0]
else:
a_ = old_checkpoint[path['old']]
def __UpperCamelCase ( lowercase_ : Optional[Any] , lowercase_ : List[str] ):
"""simple docstring"""
a_ = {}
a_ = checkpoint['time_embed.0.weight']
a_ = checkpoint['time_embed.0.bias']
a_ = checkpoint['time_embed.2.weight']
a_ = checkpoint['time_embed.2.bias']
a_ = checkpoint['input_blocks.0.0.weight']
a_ = checkpoint['input_blocks.0.0.bias']
a_ = checkpoint['out.0.weight']
a_ = checkpoint['out.0.bias']
a_ = checkpoint['out.2.weight']
a_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
a_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
a_ = {
layer_id: [key for key in checkpoint if F'input_blocks.{layer_id}' in key]
for layer_id in range(__lowerCAmelCase )
}
# Retrieves the keys for the middle blocks only
a_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
a_ = {
layer_id: [key for key in checkpoint if F'middle_block.{layer_id}' in key]
for layer_id in range(__lowerCAmelCase )
}
# Retrieves the keys for the output blocks only
a_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
a_ = {
layer_id: [key for key in checkpoint if F'output_blocks.{layer_id}' in key]
for layer_id in range(__lowerCAmelCase )
}
for i in range(1 , __lowerCAmelCase ):
a_ = (i - 1) // (config['num_res_blocks'] + 1)
a_ = (i - 1) % (config['num_res_blocks'] + 1)
a_ = [key for key in input_blocks[i] if F'input_blocks.{i}.0' in key]
a_ = [key for key in input_blocks[i] if F'input_blocks.{i}.1' in key]
if F'input_blocks.{i}.0.op.weight' in checkpoint:
a_ = checkpoint[
F'input_blocks.{i}.0.op.weight'
]
a_ = checkpoint[
F'input_blocks.{i}.0.op.bias'
]
continue
a_ = renew_resnet_paths(__lowerCAmelCase )
a_ = {'old': F'input_blocks.{i}.0', 'new': F'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
a_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path, resnet_op] , config=__lowerCAmelCase )
if len(__lowerCAmelCase ):
a_ = renew_attention_paths(__lowerCAmelCase )
a_ = {
'old': F'input_blocks.{i}.1',
'new': F'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
a_ = {
F'input_blocks.{i}.1.qkv.bias': {
'key': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'input_blocks.{i}.1.qkv.weight': {
'key': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=__lowerCAmelCase , config=__lowerCAmelCase , )
a_ = middle_blocks[0]
a_ = middle_blocks[1]
a_ = middle_blocks[2]
a_ = renew_resnet_paths(__lowerCAmelCase )
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , config=__lowerCAmelCase )
a_ = renew_resnet_paths(__lowerCAmelCase )
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , config=__lowerCAmelCase )
a_ = renew_attention_paths(__lowerCAmelCase )
a_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , attention_paths_to_split=__lowerCAmelCase , config=__lowerCAmelCase )
for i in range(__lowerCAmelCase ):
a_ = i // (config['num_res_blocks'] + 1)
a_ = i % (config['num_res_blocks'] + 1)
a_ = [shave_segments(__lowerCAmelCase , 2 ) for name in output_blocks[i]]
a_ = {}
for layer in output_block_layers:
a_ , a_ = layer.split('.' )[0], shave_segments(__lowerCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__lowerCAmelCase )
else:
a_ = [layer_name]
if len(__lowerCAmelCase ) > 1:
a_ = [key for key in output_blocks[i] if F'output_blocks.{i}.0' in key]
a_ = [key for key in output_blocks[i] if F'output_blocks.{i}.1' in key]
a_ = renew_resnet_paths(__lowerCAmelCase )
a_ = renew_resnet_paths(__lowerCAmelCase )
a_ = {'old': F'output_blocks.{i}.0', 'new': F'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
a_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
a_ = checkpoint[
F'output_blocks.{i}.{index}.conv.weight'
]
a_ = checkpoint[
F'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(__lowerCAmelCase ) == 2:
a_ = []
if len(__lowerCAmelCase ):
a_ = renew_attention_paths(__lowerCAmelCase )
a_ = {
'old': F'output_blocks.{i}.1',
'new': F'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
a_ = {
F'output_blocks.{i}.1.qkv.bias': {
'key': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'output_blocks.{i}.1.qkv.weight': {
'key': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=__lowerCAmelCase , )
else:
a_ = renew_resnet_paths(__lowerCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
a_ = '.'.join(['output_blocks', str(__lowerCAmelCase ), path['old']] )
a_ = '.'.join(['up_blocks', str(__lowerCAmelCase ), 'resnets', str(__lowerCAmelCase ), path['new']] )
a_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
__lowerCAmelCase = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 536 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , __lowercase : List[Any] , __lowercase : Dict=13 , __lowercase : Tuple=32 , __lowercase : Any=2 , __lowercase : List[str]=3 , __lowercase : str=16 , __lowercase : List[str]=[1, 2, 1] , __lowercase : Optional[Any]=[2, 2, 4] , __lowercase : int=2 , __lowercase : Optional[int]=2.0 , __lowercase : Optional[Any]=True , __lowercase : str=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : Dict=0.1 , __lowercase : Optional[int]="gelu" , __lowercase : Dict=False , __lowercase : Tuple=True , __lowercase : Any=0.02 , __lowercase : int=1E-5 , __lowercase : str=True , __lowercase : Optional[int]=None , __lowercase : List[Any]=True , __lowercase : str=10 , __lowercase : int=8 , __lowercase : str=["stage1", "stage2", "stage3"] , __lowercase : Dict=[1, 2, 3] , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = patch_norm
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = is_training
snake_case_ = scope
snake_case_ = use_labels
snake_case_ = type_sequence_label_size
snake_case_ = encoder_stride
snake_case_ = out_features
snake_case_ = out_indices
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Dict ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self : Tuple , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : str ):
"""simple docstring"""
snake_case_ = MaskFormerSwinModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case_ = model(_lowerCAmelCase )
snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Optional[Any] , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Tuple ):
"""simple docstring"""
snake_case_ = MaskFormerSwinBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case_ = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCAmelCase ):
snake_case_ = ["stem"]
snake_case_ = MaskFormerSwinBackbone(config=_lowerCAmelCase )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = MaskFormerSwinModelTester(self )
snake_case_ = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
pass
def snake_case__ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
pass
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_lowerCAmelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def snake_case__ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def snake_case__ ( self : Any ):
"""simple docstring"""
pass
def snake_case__ ( self : int , __lowercase : int , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Any ):
"""simple docstring"""
snake_case_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swin has a different seq_length
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def snake_case__ ( self : int ):
"""simple docstring"""
pass
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowercase : List[Any] ):
snake_case_ = 0
return t
def check_equivalence(__lowercase : Tuple , __lowercase : Dict , __lowercase : List[Any] , __lowercase : Optional[Any]={} ):
with torch.no_grad():
snake_case_ = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase )
snake_case_ = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase ).to_tuple()
def recursive_check(__lowercase : Dict , __lowercase : Tuple ):
if isinstance(_lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCAmelCase , _lowerCAmelCase ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCAmelCase ) , set_nan_tensor_to_zero(_lowerCAmelCase ) , atol=1E-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}. Dict has"
f" `nan`: {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}."
) , )
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
for model_class in self.all_model_classes:
snake_case_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
snake_case_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"output_hidden_states": True} )
snake_case_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
snake_case_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"output_hidden_states": True} )
@require_torch
class UpperCAmelCase ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = MaskFormerSwinConfig
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = MaskFormerSwinModelTester(self )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
snake_case_ = backbone_class(_lowerCAmelCase )
backbone.to(_lowerCAmelCase )
backbone.eval()
snake_case_ = backbone(**_lowerCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
snake_case_ = backbone(**_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
snake_case_ , snake_case_ , snake_case_ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
snake_case_ = backbone(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 376 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase__ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ = src_path
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 50 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = 42
a = jnp.floataa
a = True
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
super().setup()
SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Tuple , *__lowerCamelCase : Tuple , **__lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = super().__call__(*_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = FlaxBigBirdForNaturalQuestionsModule
def UpperCAmelCase_ ( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
def cross_entropy(_A , _A , _A=None ):
SCREAMING_SNAKE_CASE__ = logits.shape[-1]
SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(__lowerCAmelCase )[None]).astype('''f4''' )
SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(__lowerCAmelCase , axis=-1 )
SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
SCREAMING_SNAKE_CASE__ = reduction(__lowerCAmelCase )
return loss
SCREAMING_SNAKE_CASE__ = partial(__lowerCAmelCase , reduction=jnp.mean )
SCREAMING_SNAKE_CASE__ = cross_entropy(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = cross_entropy(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = cross_entropy(__lowerCAmelCase , __lowerCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = "google/bigbird-roberta-base"
a = 30_00
a = 1_05_00
a = 1_28
a = 3
a = 1
a = 5
# tx_args
a = 3e-5
a = 0.0
a = 2_00_00
a = 0.0_0_9_5
a = "bigbird-roberta-natural-questions"
a = "training-expt"
a = "data/nq-training.jsonl"
a = "data/nq-validation.jsonl"
def lowercase_ ( self : Optional[int] ) -> List[Any]:
os.makedirs(self.base_dir , exist_ok=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir )
SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = 42
a = 40_96 # no dynamic padding on TPUs
def __call__( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.collate_fn(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(_lowerCAmelCase , _lowerCAmelCase )
return batch
def lowercase_ ( self : Tuple , __lowerCamelCase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features['''input_ids'''] )
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': jnp.array(_lowerCAmelCase , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(_lowerCAmelCase , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def lowercase_ ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(_lowerCAmelCase ) for ids in input_ids]
return zip(*_lowerCAmelCase )
def lowercase_ ( self : str , __lowerCamelCase : str ) -> int:
SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(_lowerCAmelCase ) )]
while len(_lowerCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def UpperCAmelCase_ ( _A , _A , _A=None ):
'''simple docstring'''
if seed is not None:
SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) // batch_size ):
SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__lowerCAmelCase )
@partial(jax.pmap , axis_name='''batch''' )
def UpperCAmelCase_ ( _A , _A , **_A ):
'''simple docstring'''
def loss_fn(_A ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop('''start_labels''' )
SCREAMING_SNAKE_CASE__ = model_inputs.pop('''end_labels''' )
SCREAMING_SNAKE_CASE__ = model_inputs.pop('''pooled_labels''' )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**__lowerCAmelCase , params=__lowerCAmelCase , dropout_rng=__lowerCAmelCase , train=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = outputs
return state.loss_fn(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = jax.random.split(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = jax.value_and_grad(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = grad_fn(state.params )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean(__lowerCAmelCase , '''batch''' )
SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=__lowerCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def UpperCAmelCase_ ( _A , **_A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = model_inputs.pop('''start_labels''' )
SCREAMING_SNAKE_CASE__ = model_inputs.pop('''end_labels''' )
SCREAMING_SNAKE_CASE__ = model_inputs.pop('''pooled_labels''' )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**__lowerCAmelCase , params=state.params , train=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = outputs
SCREAMING_SNAKE_CASE__ = state.loss_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class UpperCAmelCase__ ( train_state.TrainState ):
"""simple docstring"""
a = struct.field(pytree_node=A__ )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = 42
a = 42
a = 42
a = 42
a = 42
a = 42
a = None
def lowercase_ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=None ) -> str:
SCREAMING_SNAKE_CASE__ = model.params
SCREAMING_SNAKE_CASE__ = TrainState.create(
apply_fn=model.__call__ , params=_lowerCAmelCase , tx=_lowerCAmelCase , loss_fn=_lowerCAmelCase , )
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = restore_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = build_tx(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = train_state.TrainState(
step=_lowerCAmelCase , apply_fn=model.__call__ , params=_lowerCAmelCase , tx=_lowerCAmelCase , opt_state=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = data_collator
SCREAMING_SNAKE_CASE__ = lr
SCREAMING_SNAKE_CASE__ = params
SCREAMING_SNAKE_CASE__ = jax_utils.replicate(_lowerCAmelCase )
return state
def lowercase_ ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.args
SCREAMING_SNAKE_CASE__ = len(_lowerCAmelCase ) // args.batch_size
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.split(_lowerCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = get_batched_dataset(_lowerCAmelCase , args.batch_size , seed=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(_lowerCAmelCase , total=_lowerCAmelCase , desc=f'''Running EPOCH-{epoch}''' ):
SCREAMING_SNAKE_CASE__ = self.data_collator(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.train_step_fn(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step )
SCREAMING_SNAKE_CASE__ = running_loss.item() / i
SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 )
SCREAMING_SNAKE_CASE__ = self.evaluate(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(_lowerCAmelCase ) )
self.logger.log(_lowerCAmelCase , commit=_lowerCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=_lowerCAmelCase )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = get_batched_dataset(_lowerCAmelCase , self.args.batch_size )
SCREAMING_SNAKE_CASE__ = len(_lowerCAmelCase ) // self.args.batch_size
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(_lowerCAmelCase , total=_lowerCAmelCase , desc='''Evaluating ... ''' ):
SCREAMING_SNAKE_CASE__ = self.data_collator(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = self.val_step_fn(_lowerCAmelCase , **_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(_lowerCAmelCase )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''' )
self.model_save_fn(_lowerCAmelCase , params=state.params )
with open(os.path.join(_lowerCAmelCase , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_lowerCAmelCase , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(_lowerCAmelCase , '''data_collator.joblib''' ) )
with open(os.path.join(_lowerCAmelCase , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , _lowerCAmelCase )
print('''DONE''' )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(__lowerCAmelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() )
with open(os.path.join(__lowerCAmelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(__lowerCAmelCase , '''args.joblib''' ) )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(__lowerCAmelCase , '''data_collator.joblib''' ) )
with open(os.path.join(__lowerCAmelCase , '''training_state.json''' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.load(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=__lowerCAmelCase , end_value=__lowerCAmelCase , transition_steps=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=__lowerCAmelCase , end_value=1e-7 , transition_steps=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCAmelCase_ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
def weight_decay_mask(_A ):
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = scheduler_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=__lowerCAmelCase , weight_decay=__lowerCAmelCase , mask=__lowerCAmelCase )
return tx, lr
| 493 |
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCamelCase__ = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 50 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase_ (self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.dummy_uncond_unet
UpperCamelCase__ = PNDMScheduler()
UpperCamelCase__ = PNDMPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pndm.to(_lowerCAmelCase )
pndm.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pndm(generator=_lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pndm(generator=_lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=_lowerCAmelCase )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """google/ddpm-cifar10-32"""
UpperCamelCase__ = UNetaDModel.from_pretrained(_lowerCAmelCase )
UpperCamelCase__ = PNDMScheduler()
UpperCamelCase__ = PNDMPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pndm.to(_lowerCAmelCase )
pndm.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pndm(generator=_lowerCAmelCase , output_type="""numpy""" ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 513 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : str = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
lowerCamelCase : Tuple = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_SCREAMING_SNAKE_CASE =int(re.match(r'.*layer_(\d*).*' , __lowerCAmelCase )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def _lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
_SCREAMING_SNAKE_CASE =re.search(r'[^\d](\d+)$' , str(__lowerCAmelCase ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
_SCREAMING_SNAKE_CASE =int(bit_search.groups()[0] )
return bit_size // 8
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
if bloom_config_file == "":
_SCREAMING_SNAKE_CASE =BloomConfig()
else:
_SCREAMING_SNAKE_CASE =BloomConfig.from_json_file(__lowerCAmelCase )
if shard_model:
_SCREAMING_SNAKE_CASE =os.listdir(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =sorted(filter(lambda _UpperCamelCase : s.startswith('layer' ) and "model_00" in s , __lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE ={'weight_map': {}, 'metadata': {}}
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BloomConfig()
for j, file in enumerate(__lowerCAmelCase ):
print('Processing file: {}'.format(__lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE =None
for i in range(__lowerCAmelCase ):
# load all TP files
_SCREAMING_SNAKE_CASE =file.replace('model_00' , f"model_0{i}" )
_SCREAMING_SNAKE_CASE =torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , map_location='cpu' )
# Rename keys in the transformers names
_SCREAMING_SNAKE_CASE =list(temp.keys() )
for key in keys:
_SCREAMING_SNAKE_CASE =temp.pop(__lowerCAmelCase )
if tensors is None:
_SCREAMING_SNAKE_CASE =temp
else:
for key in tensors.keys():
if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_SCREAMING_SNAKE_CASE =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_SCREAMING_SNAKE_CASE =torch.cat([tensors[key], temp[key]] , dim=__lowerCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_SCREAMING_SNAKE_CASE =tensors[key] / pretraining_tp
torch.save(
__lowerCAmelCase , os.path.join(
__lowerCAmelCase , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(__lowerCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
_SCREAMING_SNAKE_CASE =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_SCREAMING_SNAKE_CASE ='pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(__lowerCAmelCase ) ).zfill(5 ) )
_SCREAMING_SNAKE_CASE =BloomConfig()
_SCREAMING_SNAKE_CASE =pytorch_dump_folder_path + '/' + CONFIG_NAME
_SCREAMING_SNAKE_CASE =total_size
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__lowerCAmelCase , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
_SCREAMING_SNAKE_CASE =json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + '\n'
f.write(__lowerCAmelCase )
else:
_SCREAMING_SNAKE_CASE =BloomModel(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =os.listdir(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =sorted(filter(lambda _UpperCamelCase : s.startswith('layer' ) and "model_00" in s , __lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE =None
for i, file in enumerate(__lowerCAmelCase ):
_SCREAMING_SNAKE_CASE =None
for i in range(__lowerCAmelCase ):
# load all TP files
_SCREAMING_SNAKE_CASE =file.replace('model_00' , f"model_0{i}" )
_SCREAMING_SNAKE_CASE =torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , map_location='cpu' )
# Rename keys in the transformers names
_SCREAMING_SNAKE_CASE =list(temp.keys() )
for key in keys:
_SCREAMING_SNAKE_CASE =temp.pop(__lowerCAmelCase )
if tensors is None:
_SCREAMING_SNAKE_CASE =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_SCREAMING_SNAKE_CASE =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_SCREAMING_SNAKE_CASE =torch.cat([tensors[key], temp[key]] , dim=__lowerCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_SCREAMING_SNAKE_CASE =tensors[key] / pretraining_tp
_SCREAMING_SNAKE_CASE =model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
_SCREAMING_SNAKE_CASE =set(other_keys.missing_keys )
else:
_SCREAMING_SNAKE_CASE =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_SCREAMING_SNAKE_CASE =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
_SCREAMING_SNAKE_CASE =model.to(config.torch_dtype )
torch.save(model.state_dict() , __lowerCAmelCase )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 405 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,_lowerCAmelCase ,)
super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : tuple[int, int] , lowercase : int ) -> List[Any]:
__snake_case , __snake_case : List[Any] = position
__snake_case : List[str] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__snake_case : Tuple = []
for position in positions:
__snake_case , __snake_case : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__lowerCAmelCase )
return permissible_positions
def lowerCAmelCase__( lowercase : list[list[int]] ) -> Optional[Any]:
return not any(elem == 0 for row in board for elem in row )
def lowerCAmelCase__( lowercase : list[list[int]] , lowercase : tuple[int, int] , lowercase : int ) -> List[Any]:
if is_complete(__lowerCAmelCase ):
return True
for position in get_valid_pos(__lowerCAmelCase , len(__lowerCAmelCase ) ):
__snake_case , __snake_case : List[str] = position
if board[y][x] == 0:
__snake_case : Optional[Any] = curr + 1
if open_knight_tour_helper(__lowerCAmelCase , __lowerCAmelCase , curr + 1 ):
return True
__snake_case : List[Any] = 0
return False
def lowerCAmelCase__( lowercase : int ) -> Optional[Any]:
__snake_case : Optional[int] = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
__snake_case : Optional[Any] = 1
if open_knight_tour_helper(__lowerCAmelCase , (i, j) , 1 ):
return board
__snake_case : int = 0
__snake_case : List[Any] = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowerCamelCase__ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase : Dict = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase : str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 50 | 0 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowercase = logging.get_logger(__name__)
def lowerCamelCase__ ( a , a , a , a=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__snake_case = os.path.abspath(__lowerCAmelCase )
logger.info(f'Loading PyTorch weights from {pt_path}' )
__snake_case = torch.load(__lowerCAmelCase , map_location='cpu' )
logger.info(f'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__snake_case = convert_pytorch_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__snake_case = convert_pytorch_sharded_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
return flax_state_dict
def lowerCamelCase__ ( a , a , a , a , ):
def is_key_or_prefix_key_in_dict(a ) -> bool:
return len(set(__lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__snake_case = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__snake_case = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__snake_case = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__snake_case = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__snake_case = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
__snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__snake_case = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
__snake_case = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__snake_case = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__snake_case = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__snake_case = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__snake_case = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__snake_case = pt_tuple_key[-2] + '_v'
if name is not None:
__snake_case = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( a , a ):
# convert pytorch tensor to numpy
__snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
__snake_case = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__snake_case = flax_model.params['params']
else:
__snake_case = flax_model.params
__snake_case = flatten_dict(__lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__snake_case = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(__lowerCAmelCase )
__snake_case = {}
__snake_case = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__snake_case = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__snake_case = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case = pt_tuple_key[1:]
# Correctly rename weight parameters
__snake_case , __snake_case = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
__snake_case = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__snake_case = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def lowerCamelCase__ ( a , a ):
import torch
# Load the index
__snake_case = {}
for shard_file in shard_filenames:
# load using msgpack utils
__snake_case = torch.load(__lowerCAmelCase )
__snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
__snake_case = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__snake_case = flax_model.params['params']
__snake_case = flatten_dict(__lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__snake_case = flax_model.params
__snake_case = flatten_dict(__lowerCAmelCase )
__snake_case = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__snake_case = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__snake_case = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case = pt_tuple_key[1:]
# Correctly rename weight parameters
__snake_case , __snake_case = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
__snake_case = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__snake_case = jnp.asarray(__lowerCAmelCase )
continue
if "var" in flax_key[-1]:
__snake_case = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def lowerCamelCase__ ( a , a ):
__snake_case = os.path.abspath(__lowerCAmelCase )
logger.info(f'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__snake_case = getattr(__lowerCAmelCase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCAmelCase , 'rb' ) as state_f:
try:
__snake_case = from_bytes(__lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase__ ( a , a ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__snake_case = flatten_dict(jax.tree_util.tree_map(lambda a : x.dtype == jnp.bfloataa , __lowerCAmelCase ) ).values()
if any(__lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__snake_case = jax.tree_util.tree_map(
lambda a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCAmelCase )
__snake_case = flatten_dict(__lowerCAmelCase )
__snake_case = pt_model.state_dict()
__snake_case = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__snake_case = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__snake_case = []
__snake_case = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case = flax_key_tuple[0] == pt_model.base_model_prefix
__snake_case = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# conv layer
__snake_case = flax_key_tuple[:-1] + ('weight',)
__snake_case = jnp.transpose(__lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# linear layer
__snake_case = flax_key_tuple[:-1] + ('weight',)
__snake_case = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__snake_case = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__snake_case = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__snake_case = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__snake_case = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__snake_case = '.'.join(__lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__snake_case = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__snake_case = key.split('.' )
__snake_case = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__snake_case = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__snake_case = key_components[-2] + '_v'
if name is not None:
__snake_case = key_components[:-3] + [name]
__snake_case = '.'.join(__lowerCAmelCase )
__snake_case = key
if flax_key in special_pt_names:
__snake_case = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__snake_case = np.asarray(__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , np.ndarray ) else flax_tensor
__snake_case = torch.from_numpy(__lowerCAmelCase )
# remove from missing keys
missing_keys.remove(__lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCAmelCase )
pt_model.load_state_dict(__lowerCAmelCase )
# re-transform missing_keys to list
__snake_case = list(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(__lowerCAmelCase ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
f'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
f'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 356 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase :Any = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 561 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A__ ( __lowerCAmelCase : dict ):
return (data["data"], data["target"])
def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ):
lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
lowerCamelCase__ = xgb.predict(__lowerCAmelCase )
lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def A__ ( ):
lowerCamelCase__ = fetch_california_housing()
lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 50 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 173 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50 | 0 |
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase : Union[str, Any] = 'docs/source/en/_toctree.yml'
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = defaultdict(__lowerCAmelCase )
lowerCamelCase = []
lowerCamelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__lowerCAmelCase )
lowerCamelCase = new_doc_list
lowerCamelCase = [key for key, value in counts.items() if value > 1]
lowerCamelCase = []
for duplicate_key in duplicates:
lowerCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__lowerCAmelCase ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
lowerCamelCase = sorted(__lowerCAmelCase , key=lambda lowerCamelCase__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__lowerCAmelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__lowerCAmelCase )
# Sort
return overview_doc
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any]=False ):
'''simple docstring'''
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
lowerCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
lowerCamelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowerCamelCase = api_doc[scheduler_idx]["""sections"""]
lowerCamelCase = clean_doc_toc(__lowerCAmelCase )
lowerCamelCase = False
if new_scheduler_doc != scheduler_doc:
lowerCamelCase = True
if overwrite:
lowerCamelCase = new_scheduler_doc
if diff:
if overwrite:
lowerCamelCase = api_doc
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def __lowerCamelCase ( lowerCamelCase__ : List[str]=False ):
'''simple docstring'''
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
lowerCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
lowerCamelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowerCamelCase = False
lowerCamelCase = api_doc[pipeline_idx]["""sections"""]
lowerCamelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowerCamelCase = pipeline_doc["""section"""]
lowerCamelCase = clean_doc_toc(__lowerCAmelCase )
if overwrite:
lowerCamelCase = new_sub_pipeline_doc
new_pipeline_docs.append(__lowerCAmelCase )
# sort overall pipeline doc
lowerCamelCase = clean_doc_toc(__lowerCAmelCase )
if new_pipeline_docs != pipeline_docs:
lowerCamelCase = True
if overwrite:
lowerCamelCase = new_pipeline_docs
if diff:
if overwrite:
lowerCamelCase = api_doc
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase : Any = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 457 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import os
def lowerCamelCase__ ( snake_case_ : str = "input.txt" ) -> List[Any]:
with open(os.path.join(os.path.dirname(__lowerCAmelCase ) , __lowerCAmelCase ) ) as input_file:
__snake_case = [
[int(__lowerCAmelCase ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
__snake_case = len(__lowerCAmelCase )
__snake_case = len(matrix[0] )
__snake_case = [[-1 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
__snake_case = matrix[i][0]
for j in range(1 , __lowerCAmelCase ):
for i in range(__lowerCAmelCase ):
__snake_case = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __lowerCAmelCase ):
__snake_case = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__snake_case = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 592 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
'''simple docstring'''
def __UpperCamelCase ( lowercase_ : int ):
"""simple docstring"""
a_ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __UpperCamelCase ( lowercase_ : int = 5_000 ):
"""simple docstring"""
a_ = [(i * (3 * i - 1)) // 2 for i in range(1 , __lowerCAmelCase )]
for i, pentagonal_i in enumerate(__lowerCAmelCase ):
for j in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
a_ = pentagonal_nums[j]
a_ = pentagonal_i + pentagonal_j
a_ = pentagonal_j - pentagonal_i
if is_pentagonal(__lowerCAmelCase ) and is_pentagonal(__lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 536 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : List[Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Tuple ):
"""simple docstring"""
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertAlmostEqual(_lowerCAmelCase , _lowerCAmelCase , delta=_lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowerCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = None
ops.enable_eager_execution_internal()
snake_case_ = tf.config.list_physical_devices("CPU" )
if len(_lowerCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
snake_case_ = tf.config.list_logical_devices(device_type="CPU" )
snake_case_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
snake_case_ = GradientAccumulator()
snake_case_ = tf.Variable([4.0, 3.0] )
snake_case_ , snake_case_ = create_optimizer(5E-5 , 10 , 5 )
snake_case_ = tf.Variable([0.0, 0.0] , trainable=_lowerCAmelCase )
def accumulate_on_replica(__lowercase : Dict ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowercase : str , __lowercase : List[str] ):
with strategy.scope():
snake_case_ = strategy.experimental_local_results(_lowerCAmelCase )
local_variables[0].assign(_lowerCAmelCase )
local_variables[1].assign(_lowerCAmelCase )
strategy.run(_lowerCAmelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowerCAmelCase )
def _check_local_values(__lowercase : List[Any] , __lowercase : Optional[int] ):
snake_case_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowerCAmelCase , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _lowerCAmelCase , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 376 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase : int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 50 | 0 |
import math
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCAmelCase )
def UpperCAmelCase_ ( _A = 1 / 1_23_45 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 3
while True:
SCREAMING_SNAKE_CASE__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ = int(__lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(__lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 493 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __A( __lowerCamelCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = 8
# DPR tok
UpperCamelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase__ = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCamelCase__ = os.path.join(_lowerCAmelCase , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
UpperCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCamelCase__ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase__ = {"""unk_token""": """<unk>"""}
UpperCamelCase__ = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCamelCase__ = os.path.join(_lowerCAmelCase , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ = os.path.join(_lowerCAmelCase , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def UpperCAmelCase_ (self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def UpperCAmelCase_ (self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def UpperCAmelCase_ (self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def UpperCAmelCase_ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_dummy_dataset()
UpperCamelCase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
UpperCamelCase__ = dataset
UpperCamelCase__ = RagRetriever(
_lowerCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.get_dummy_dataset()
UpperCamelCase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
UpperCamelCase__ = os.path.join(self.tmpdirname , """dataset""" )
UpperCamelCase__ = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
UpperCamelCase__ = RagRetriever(
_lowerCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCamelCase__ = RagRetriever(
_lowerCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _lowerCAmelCase ) , )
return retriever
def UpperCAmelCase_ (self ):
UpperCamelCase__ = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase__ = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
UpperCamelCase__ = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
UpperCamelCase__ = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(_lowerCAmelCase , open(_lowerCAmelCase , """wb""" ) )
UpperCamelCase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
UpperCamelCase__ = RagRetriever(
_lowerCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 1
UpperCamelCase__ = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = retriever.retrieve(_lowerCAmelCase , n_docs=_lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_lowerCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , _lowerCAmelCase )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
UpperCamelCase__ = self.get_dummy_dataset()
retriever.save_pretrained(_lowerCAmelCase )
UpperCamelCase__ = RagRetriever.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ = retriever.retrieve(_lowerCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 1
UpperCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCAmelCase )
UpperCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = retriever.retrieve(_lowerCAmelCase , n_docs=_lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_lowerCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , _lowerCAmelCase )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_lowerCAmelCase )
UpperCamelCase__ = RagRetriever.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ = retriever.retrieve(_lowerCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 1
UpperCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCAmelCase )
UpperCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = retriever.retrieve(_lowerCAmelCase , n_docs=_lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_lowerCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , _lowerCAmelCase )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_lowerCAmelCase )
UpperCamelCase__ = RagRetriever.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ = retriever.retrieve(_lowerCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 1
UpperCamelCase__ = self.get_dummy_legacy_index_retriever()
UpperCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = retriever.retrieve(_lowerCAmelCase , n_docs=_lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_lowerCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , _lowerCAmelCase )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_lowerCAmelCase )
UpperCamelCase__ = RagRetriever.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ = retriever.retrieve(_lowerCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ (self ):
import torch
UpperCamelCase__ = 1
UpperCamelCase__ = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase__ = [[5, 7], [10, 11]]
UpperCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ = retriever(_lowerCAmelCase , _lowerCAmelCase , prefix=retriever.config.generator.prefix , n_docs=_lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
UpperCamelCase__ = retriever(
_lowerCAmelCase , _lowerCAmelCase , prefix=retriever.config.generator.prefix , n_docs=_lowerCAmelCase , return_tensors="""pt""" , )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_dpr_ctx_encoder_tokenizer()
UpperCamelCase__ = 1
UpperCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCAmelCase )
retriever.set_ctx_encoder_tokenizer(_lowerCAmelCase )
UpperCamelCase__ = [[5, 7], [10, 11]]
UpperCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ = retriever(_lowerCAmelCase , _lowerCAmelCase , prefix=retriever.config.generator.prefix , n_docs=_lowerCAmelCase )
self.assertEqual(
len(_lowerCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , _lowerCAmelCase ) # check for doc token related keys in dictionary.
| 513 |
'''simple docstring'''
from PIL import Image
def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 50 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _a : int , _a : Tuple=7 , _a : Optional[Any]=3 , _a : Tuple=18 , _a : int=30 , _a : str=400 , _a : int=True , _a : Optional[int]=None , _a : Optional[int]=True , _a : List[Any]=None , _a : Tuple=True , _a : str=[0.5, 0.5, 0.5] , _a : Optional[Any]=[0.5, 0.5, 0.5] , _a : List[Any]=False , ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size if size is not None else {'height': 20, 'width': 20}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'height': 18, 'width': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_reduce_labels
def A ( self : int ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_SCREAMING_SNAKE_CASE =Image.open(dataset[0]['file'] )
_SCREAMING_SNAKE_CASE =Image.open(dataset[1]['file'] )
return image, map
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
_SCREAMING_SNAKE_CASE =Image.open(ds[1]['file'] )
_SCREAMING_SNAKE_CASE =Image.open(ds[2]['file'] )
_SCREAMING_SNAKE_CASE =Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = BeitImageProcessor if is_vision_available() else None
def A ( self : str ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BeitImageProcessingTester(self )
@property
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCAmelCase )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCAmelCase )
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def A ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A ( self : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE =[]
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =prepare_semantic_single_inputs()
_SCREAMING_SNAKE_CASE =image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =prepare_semantic_batch_inputs()
_SCREAMING_SNAKE_CASE =image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def A ( self : int ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =prepare_semantic_single_inputs()
_SCREAMING_SNAKE_CASE =image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 405 |
'''simple docstring'''
def A__ ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCamelCase : Dict = generate_large_matrix()
UpperCamelCase : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__ ( __lowerCAmelCase : list[list[int]] ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase__ = (left + right) // 2
lowerCamelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def A__ ( __lowerCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def A__ ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowerCamelCase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 50 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Tuple =RobertaTokenizer
UpperCAmelCase_ : Union[str, Any] =RobertaTokenizerFast
UpperCAmelCase_ : Tuple =True
UpperCAmelCase_ : List[Any] ={"cls_token": "<s>"}
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__snake_case : Optional[int] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__snake_case : Tuple = {"unk_token": "<unk>"}
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case : int = "lower newer"
__snake_case : str = "lower newer"
return input_text, output_text
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : str = "lower newer"
__snake_case : Union[str, Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
__snake_case : List[str] = tokenizer.tokenize(_lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = tokens + [tokenizer.unk_token]
__snake_case : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=_lowerCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=_lowerCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = self.tokenizer_class.from_pretrained("roberta-base" )
__snake_case : int = tokenizer.encode("sequence builders" , add_special_tokens=_lowerCAmelCase )
__snake_case : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCAmelCase )
__snake_case : List[Any] = tokenizer.encode(
"sequence builders" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__snake_case : int = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__snake_case : Any = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Optional[int] = "Encode this sequence."
__snake_case : List[str] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
__snake_case : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Optional[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
__snake_case : Optional[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_lowerCAmelCase , _lowerCAmelCase )
# Testing spaces after special tokens
__snake_case : Optional[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )} ) # mask token has a left space
__snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
__snake_case : str = "Encode <mask> sequence"
__snake_case : Optional[int] = "Encode <mask>sequence"
__snake_case : Dict = tokenizer.encode(_lowerCAmelCase )
__snake_case : List[str] = encoded.index(_lowerCAmelCase )
__snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : str = tokenizer.encode(_lowerCAmelCase )
__snake_case : List[str] = encoded.index(_lowerCAmelCase )
__snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : List[Any] = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Optional[int] = "A, <mask> AllenNLP sentence."
__snake_case : Optional[Any] = tokenizer_r.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
__snake_case : Tuple = tokenizer_p.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
__snake_case : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__snake_case : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__snake_case : str = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__snake_case : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , _lowerCAmelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , _lowerCAmelCase )
self.assertEqual(post_processor_state["trim_offsets"] , _lowerCAmelCase )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : Any = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case : List[Any] = F"""{text_of_1_token} {text_of_1_token}"""
__snake_case : str = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__snake_case : Optional[int] = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
__snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__snake_case : List[Any] = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
__snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__snake_case : Optional[Any] = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ), len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
__snake_case : Any = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__snake_case : int = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ), len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
__snake_case : Union[str, Any] = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__snake_case : Dict = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCAmelCase ) + 1, 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
__snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__snake_case : Dict = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCAmelCase ), 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
__snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__snake_case : int = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCAmelCase ), 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
| 243 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase : List[Any] = 'examples/'
UpperCamelCase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase : Any = 'README.md'
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern]
lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start of the list.
lowerCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def A__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ = default_version.base_version
elif patch:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A__ ( ):
lowerCamelCase__ = get_version()
lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 50 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[int] ):
__snake_case = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__snake_case = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case = model(_lowerCAmelCase )['last_hidden_state']
__snake_case = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice.
__snake_case = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
lowerCAmelCase :int = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase :List[str] = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
lowerCAmelCase :int = BeautifulSoup(res.text, '''html.parser''')
lowerCAmelCase :Optional[Any] = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 561 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __lowerCAmelCase : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def A__ ( __lowerCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = set()
for token in tokens:
lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase__ = list(__lowerCAmelCase )
return word_list
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase__ = bert_tokens
lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase__ = True
if is_chinese(bert_word[start] ):
lowerCamelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase__ = """##""" + bert_word[j]
lowerCamelCase__ = start + i
lowerCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ):
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = []
for id in input_ids:
lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__ ( __lowerCAmelCase : Optional[int] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase : Any = parser.parse_args()
main(args)
| 50 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__SCREAMING_SNAKE_CASE = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__SCREAMING_SNAKE_CASE = field(default=lowerCamelCase_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def A__ ( ):
'''simple docstring'''
UpperCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome.")
UpperCamelCase : Optional[int] = import_module("tasks")
try:
UpperCamelCase : int = getattr(__lowerCAmelCase , model_args.task_type)
UpperCamelCase : int = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''')
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __lowerCAmelCase)
# Set seed
set_seed(training_args.seed)
# Prepare CONLL-2003 task
UpperCamelCase : int = token_classification_task.get_labels(data_args.labels)
UpperCamelCase : List[Any] = dict(enumerate(__lowerCAmelCase))
UpperCamelCase : Any = len(__lowerCAmelCase)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid={label: i for i, label in enumerate(__lowerCAmelCase)} , cache_dir=model_args.cache_dir , )
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
UpperCamelCase : Any = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase : List[str] = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase : List[str] = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A : np.ndarray , A : np.ndarray) -> Tuple[List[int], List[int]]:
UpperCamelCase : Dict = np.argmax(__lowerCAmelCase , axis=2)
UpperCamelCase , UpperCamelCase : str = preds.shape
UpperCamelCase : List[str] = [[] for _ in range(__lowerCAmelCase)]
UpperCamelCase : str = [[] for _ in range(__lowerCAmelCase)]
for i in range(__lowerCAmelCase):
for j in range(__lowerCAmelCase):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def compute_metrics(A : EvalPrediction) -> Dict:
UpperCamelCase , UpperCamelCase : str = align_predictions(p.predictions , p.label_ids)
return {
"accuracy_score": accuracy_score(__lowerCAmelCase , __lowerCAmelCase),
"precision": precision_score(__lowerCAmelCase , __lowerCAmelCase),
"recall": recall_score(__lowerCAmelCase , __lowerCAmelCase),
"f1": fa_score(__lowerCAmelCase , __lowerCAmelCase),
}
# Data collator
UpperCamelCase : Dict = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
UpperCamelCase : Optional[int] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
UpperCamelCase : str = trainer.evaluate()
UpperCamelCase : int = os.path.join(training_args.output_dir , "eval_results.txt")
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s" , __lowerCAmelCase , __lowerCAmelCase)
writer.write("%s = %s\n" % (key, value))
results.update(__lowerCAmelCase)
# Predict
if training_args.do_predict:
UpperCamelCase : Union[str, Any] = TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = trainer.predict(__lowerCAmelCase)
UpperCamelCase , UpperCamelCase : List[Any] = align_predictions(__lowerCAmelCase , __lowerCAmelCase)
UpperCamelCase : Optional[Any] = os.path.join(training_args.output_dir , "test_results.txt")
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , "w") as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , __lowerCAmelCase , __lowerCAmelCase)
writer.write("%s = %s\n" % (key, value))
# Save predictions
UpperCamelCase : Dict = os.path.join(training_args.output_dir , "test_predictions.txt")
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , "w") as writer:
with open(os.path.join(data_args.data_dir , "test.txt") , "r") as f:
token_classification_task.write_predictions_to_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
return results
def A__ ( A : Union[str, Any]):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 173 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCAmelCase : List[str] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Optional[int] = "ernie_m"
UpperCamelCase : List[Any] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , A = 25_00_02 , A = 7_68 , A = 12 , A = 12 , A = 30_72 , A = "gelu" , A = 0.1 , A = 0.1 , A = 5_14 , A = 0.02 , A = 1 , A = 1e-0_5 , A=None , A=False , A=0.0 , **A , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = classifier_dropout
lowerCamelCase = is_decoder
lowerCamelCase = act_dropout
| 457 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Optional[Any] = 'bart'
A_ : Dict = ['past_key_values']
A_ : List[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self : Dict , a__ : List[str]=5_0265 , a__ : Any=1024 , a__ : Tuple=12 , a__ : Optional[Any]=4096 , a__ : Any=16 , a__ : Optional[int]=12 , a__ : Tuple=4096 , a__ : Union[str, Any]=16 , a__ : Dict=0.0 , a__ : Optional[Any]=0.0 , a__ : Optional[Any]="gelu" , a__ : Union[str, Any]=1024 , a__ : str=0.1 , a__ : Optional[int]=0.0 , a__ : str=0.0 , a__ : List[Any]=0.0_2 , a__ : int=0.0 , a__ : str=False , a__ : Dict=True , a__ : List[str]=3 , a__ : Dict=1 , a__ : Tuple=0 , a__ : Dict=2 , a__ : Optional[Any]=True , a__ : Union[str, Any]=2 , a__ : List[str]=2 , **a__ : Any , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = classifier_dropout
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , _lowerCAmelCase ):
__snake_case = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def a (self : Dict ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__snake_case = {0: '''batch'''}
__snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(_lowerCAmelCase ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def a (self : Any ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super().outputs
else:
__snake_case = super(_lowerCAmelCase , self ).outputs
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(_lowerCAmelCase ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def a (self : Union[str, Any] , a__ : List[Any] , a__ : Dict = -1 , a__ : Dict = -1 , a__ : Dict = False , a__ : List[Any] = None , ):
"""simple docstring"""
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
__snake_case = seq_length if not self.use_past else 1
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__snake_case = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__snake_case , __snake_case = common_inputs['''input_ids'''].shape
__snake_case = common_inputs['''decoder_input_ids'''].shape[1]
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = decoder_seq_length + 3
__snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
__snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case , __snake_case = self.num_layers
__snake_case = min(_lowerCAmelCase , _lowerCAmelCase )
__snake_case = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
__snake_case = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
__snake_case = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def a (self : List[Any] , a__ : Any , a__ : int = -1 , a__ : Any = -1 , a__ : Optional[Any] = False , a__ : Tuple = None , ):
"""simple docstring"""
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__snake_case , __snake_case = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case , __snake_case = self.num_layers
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = common_inputs['''attention_mask'''].dtype
__snake_case = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
__snake_case = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def a (self : List[Any] , a__ : Dict , a__ : Any = -1 , a__ : List[Any] = -1 , a__ : str = False , a__ : Dict = None , ):
"""simple docstring"""
__snake_case = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
__snake_case = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def a (self : str , a__ : List[Any] , a__ : List[Any] = -1 , a__ : Tuple = -1 , a__ : Optional[Any] = False , a__ : Tuple = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
__snake_case = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def a (self : List[Any] , a__ : List[Any] , a__ : str , a__ : Optional[int] , a__ : Optional[Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__snake_case = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 592 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (__A , __A , __A , unittest.TestCase ):
"""simple docstring"""
_a : Any = StableDiffusionControlNetImgaImgPipeline
_a : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_a : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_a : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
a_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
a_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
a_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ = CLIPTextModel(_lowerCAmelCase )
a_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a_ = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _a ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
"""simple docstring"""
if str(_lowerCAmelCase ).startswith('mps' ):
a_ = torch.manual_seed(_lowerCAmelCase )
else:
a_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
a_ = 2
a_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_lowerCAmelCase , device=torch.device(_lowerCAmelCase ) , )
a_ = floats_tensor(control_image.shape , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
a_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a_ = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert('RGB' ).resize((64, 64) )
a_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _a ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _a ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __SCREAMING_SNAKE_CASE (__A , __A , unittest.TestCase ):
"""simple docstring"""
_a : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline
_a : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_a : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a : Tuple = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _a ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCamelCase__ ):
if isinstance(_lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
a_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_lowerCAmelCase )
torch.manual_seed(0 )
a_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_lowerCAmelCase )
torch.manual_seed(0 )
a_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
a_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ = CLIPTextModel(_lowerCAmelCase )
a_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a_ = MultiControlNetModel([controlneta, controlneta] )
a_ = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _a ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
"""simple docstring"""
if str(_lowerCAmelCase ).startswith('mps' ):
a_ = torch.manual_seed(_lowerCAmelCase )
else:
a_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
a_ = 2
a_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_lowerCAmelCase , device=torch.device(_lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_lowerCAmelCase , device=torch.device(_lowerCAmelCase ) , ),
]
a_ = floats_tensor(control_image[0].shape , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
a_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a_ = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert('RGB' ).resize((64, 64) )
a_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _a ( self ):
"""simple docstring"""
a_ = self.get_dummy_components()
a_ = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
a_ = 10.0
a_ = 4
a_ = self.get_dummy_inputs(_lowerCAmelCase )
a_ = steps
a_ = scale
a_ = pipe(**_lowerCAmelCase )[0]
a_ = self.get_dummy_inputs(_lowerCAmelCase )
a_ = steps
a_ = scale
a_ = pipe(**_lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
a_ = self.get_dummy_inputs(_lowerCAmelCase )
a_ = steps
a_ = scale
a_ = pipe(**_lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
a_ = self.get_dummy_inputs(_lowerCAmelCase )
a_ = steps
a_ = scale
a_ = pipe(**_lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _a ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _a ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _a ( self ):
"""simple docstring"""
a_ = self.get_dummy_components()
a_ = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
"""simple docstring"""
a_ = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
a_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_lowerCAmelCase , controlnet=_lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
a_ = torch.Generator(device='cpu' ).manual_seed(0 )
a_ = 'evil space-punk bird'
a_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
a_ = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
a_ = pipe(
_lowerCAmelCase , _lowerCAmelCase , control_image=_lowerCAmelCase , generator=_lowerCAmelCase , output_type='np' , num_inference_steps=50 , strength=0.6 , )
a_ = output.images[0]
assert image.shape == (512, 512, 3)
a_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 536 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowercase__ : Tuple = 'true'
def lowerCamelCase__ ( _A , _A=82 , _A=16 ):
'''simple docstring'''
set_seed(42 )
snake_case_ = RegressionModel()
snake_case_ = deepcopy(__lowerCAmelCase )
snake_case_ = RegressionDataset(length=__lowerCAmelCase )
snake_case_ = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase )
model.to(accelerator.device )
snake_case_ , snake_case_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
return model, ddp_model, dataloader
def lowerCamelCase__ ( _A , _A=False ):
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
snake_case_ = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(_A ):
snake_case_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
with accelerator.main_process_first():
snake_case_ = dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
snake_case_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_A ):
if use_longest:
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=16 )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = Accelerator(dispatch_batches=__lowerCAmelCase , split_batches=__lowerCAmelCase )
snake_case_ = get_dataloader(__lowerCAmelCase , not dispatch_batches )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=__lowerCAmelCase )
snake_case_ , snake_case_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = []
for batch in dataloader:
snake_case_ , snake_case_ = batch.values()
with torch.no_grad():
snake_case_ = model(__lowerCAmelCase )
snake_case_ , snake_case_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case_ , snake_case_ = [], []
for logit, targ in logits_and_targets:
logits.append(__lowerCAmelCase )
targs.append(__lowerCAmelCase )
snake_case_ , snake_case_ = torch.cat(__lowerCAmelCase ), torch.cat(__lowerCAmelCase )
return logits, targs
def lowerCamelCase__ ( _A , _A=82 , _A=False , _A=False , _A=16 ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ = get_basic_setup(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case_ , snake_case_ = generate_predictions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert (
len(__lowerCAmelCase ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowerCAmelCase )}"
def lowerCamelCase__ ( _A = False , _A = False ):
'''simple docstring'''
snake_case_ = evaluate.load("glue" , "mrpc" )
snake_case_ , snake_case_ = get_mrpc_setup(__lowerCAmelCase , __lowerCAmelCase )
# First do baseline
snake_case_ , snake_case_ , snake_case_ = setup["no"]
model.to(__lowerCAmelCase )
model.eval()
for batch in dataloader:
batch.to(__lowerCAmelCase )
with torch.inference_mode():
snake_case_ = model(**__lowerCAmelCase )
snake_case_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowerCAmelCase , references=batch["labels"] )
snake_case_ = metric.compute()
# Then do distributed
snake_case_ , snake_case_ , snake_case_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case_ = model(**__lowerCAmelCase )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ = batch["labels"]
snake_case_ , snake_case_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowerCAmelCase , references=__lowerCAmelCase )
snake_case_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = Accelerator(split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(__lowerCAmelCase , __lowerCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case_ = Accelerator(split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(__lowerCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
snake_case_ = Accelerator()
test_torch_metrics(__lowerCAmelCase , 512 )
accelerator.state._reset_state()
def lowerCamelCase__ ( _A ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 376 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase__ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ = src_path
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 50 | 0 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = int(__lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = divmod(__lowerCAmelCase , 2 )
return binary_recursive(__lowerCAmelCase ) + str(__lowerCAmelCase )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = str(__lowerCAmelCase ).strip()
if not number:
raise ValueError('''No input value was provided''' )
SCREAMING_SNAKE_CASE__ = '''-''' if number.startswith('''-''' ) else ''''''
SCREAMING_SNAKE_CASE__ = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(__lowerCAmelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 493 |
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCamelCase__ = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 50 | 0 |
from statistics import mean
import numpy as np
def __magic_name__ ( __a : list , __a : list , __a : list , __a : int ):
'''simple docstring'''
UpperCamelCase__ = 0
# Number of processes finished
UpperCamelCase__ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCamelCase__ = [0] * no_of_process
# List to include calculation results
UpperCamelCase__ = [0] * no_of_process
# Sort by arrival time.
UpperCamelCase__ = [burst_time[i] for i in np.argsort(__lowerCAmelCase )]
UpperCamelCase__ = [process_name[i] for i in np.argsort(__lowerCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCamelCase__ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCamelCase__ = arrival_time[i]
UpperCamelCase__ = 0
# Index showing the location of the process being performed
UpperCamelCase__ = 0
# Saves the current response ratio.
UpperCamelCase__ = 0
for i in range(0 , __lowerCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCamelCase__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCamelCase__ = temp
UpperCamelCase__ = i
# Calculate the turn around time
UpperCamelCase__ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCamelCase__ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __magic_name__ ( __a : list , __a : list , __a : list , __a : int ):
'''simple docstring'''
UpperCamelCase__ = [0] * no_of_process
for i in range(0 , __lowerCAmelCase ):
UpperCamelCase__ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCamelCase_ = 5
lowerCamelCase_ = ['A', 'B', 'C', 'D', 'E']
lowerCamelCase_ = [1, 2, 3, 4, 5]
lowerCamelCase_ = [1, 2, 3, 4, 5]
lowerCamelCase_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCamelCase_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
f'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
f'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(f'average waiting time : {mean(waiting_time):.5f}')
print(f'average turn around time : {mean(turn_around_time):.5f}')
| 513 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50 | 0 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowerCamelCase : List[Any] = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _lowerCAmelCase ( _UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =_TestCommandArgs(dataset=__lowerCAmelCase , all_configs=__lowerCAmelCase , save_infos=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =TestCommand(*__lowerCAmelCase )
test_command.run()
_SCREAMING_SNAKE_CASE =os.path.join(__lowerCAmelCase , 'README.md' )
assert os.path.exists(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =DatasetInfosDict.from_directory(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) , splits=[
{
'name': 'train',
'num_bytes': 2_35_15_63,
'num_examples': 1_00_00,
},
{
'name': 'validation',
'num_bytes': 23_84_18,
'num_examples': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =getattr(dataset_infos['default'] , __lowerCAmelCase ), getattr(expected_dataset_infos['default'] , __lowerCAmelCase )
if key == "num_bytes":
assert is_apercent_close(__lowerCAmelCase , __lowerCAmelCase )
elif key == "splits":
assert list(__lowerCAmelCase ) == list(__lowerCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 405 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,_lowerCAmelCase ,)
super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
def lowerCAmelCase__( lowercase : list[int] ) -> Optional[Any]:
__snake_case : Any = []
if len(__lowerCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(__lowerCAmelCase ) ):
__snake_case : List[str] = nums.pop(0 )
__snake_case : Optional[int] = permute(__lowerCAmelCase )
for perm in permutations:
perm.append(__lowerCAmelCase )
result.extend(__lowerCAmelCase )
nums.append(__lowerCAmelCase )
return result
def lowerCAmelCase__( lowercase : List[Any] ) -> Any:
def backtrack(lowercase : Optional[int] ):
if start == len(__lowerCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
__snake_case , __snake_case : List[str] = nums[i], nums[start]
backtrack(start + 1 )
__snake_case , __snake_case : List[Any] = nums[i], nums[start] # backtrack
__snake_case : List[Any] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_UpperCamelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 243 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowerCamelCase__ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase : Dict = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase : str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 50 | 0 |
'''simple docstring'''
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCamelCase__ ( a , a , a , a ):
__snake_case = multiprocessing.Manager()
__snake_case = manager.list()
__snake_case = multiprocessing.Process(target=__lowerCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCamelCase__ ( a , a , a ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__snake_case = shutil.rmtree
__snake_case = os.rmdir
__snake_case = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__snake_case = {}
with swallow_io():
with time_limit(__lowerCAmelCase ):
exec(__lowerCAmelCase , __lowerCAmelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(f'failed: {e}' )
# Needed for cleaning up.
__snake_case = rmtree
__snake_case = rmdir
__snake_case = chdir
@contextlib.contextmanager
def lowerCamelCase__ ( a ):
def signal_handler(a , a ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , __lowerCAmelCase )
signal.signal(signal.SIGALRM , __lowerCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCamelCase__ ( ):
__snake_case = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowerCAmelCase ):
with contextlib.redirect_stderr(__lowerCAmelCase ):
with redirect_stdin(__lowerCAmelCase ):
yield
@contextlib.contextmanager
def lowerCamelCase__ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowerCAmelCase ):
yield dirname
class a_ ( UpperCAmelCase__ ):
pass
class a_ ( io.StringIO ):
def lowercase__ ( self : Dict , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ):
raise OSError
def lowercase__ ( self : Union[str, Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : Optional[int] ):
raise OSError
def lowercase__ ( self : Optional[int] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
raise OSError
def lowercase__ ( self : Tuple , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ):
return False
class a_ ( contextlib._RedirectStream ): # type: ignore
lowercase_ : List[Any] = '''stdin'''
@contextlib.contextmanager
def lowerCamelCase__ ( a ):
if root == ".":
yield
return
__snake_case = os.getcwd()
os.chdir(__lowerCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowerCAmelCase )
def lowerCamelCase__ ( a=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__snake_case = None
__snake_case = None
import os
__snake_case = '1'
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
import shutil
__snake_case = None
__snake_case = None
__snake_case = None
import subprocess
__snake_case = None # type: ignore
__snake_case = None
import sys
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
| 356 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : int = ProphetNetTokenizer
A_ : Optional[Any] = False
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
super().setUp()
__magic_name__ : Optional[int] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : List[str] ) -> Tuple:
__magic_name__ : Tuple = 'UNwant\u00E9d,running'
__magic_name__ : Tuple = 'unwanted, running'
return input_text, output_text
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.tokenizer_class(self.vocab_file )
__magic_name__ : Any = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
__magic_name__ : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __lowerCAmelCase ( self : str ) -> List[str]:
__magic_name__ : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ : str = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : List[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
__magic_name__ : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
__magic_name__ : Tuple = BasicTokenizer(do_lower_case=_lowerCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
__magic_name__ : Optional[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__magic_name__ : int = {}
for i, token in enumerate(_lowerCAmelCase ):
__magic_name__ : int = i
__magic_name__ : Any = WordpieceTokenizer(vocab=_lowerCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
__magic_name__ : Any = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__magic_name__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__magic_name__ : List[str] = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__magic_name__ : str = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='pt' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
__magic_name__ : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __lowerCAmelCase ( self : Any ) -> List[str]:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ : Any = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__magic_name__ : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=_lowerCAmelCase )
__magic_name__ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCAmelCase )
__magic_name__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__magic_name__ : List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 561 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A__ ( __lowerCAmelCase : dict ):
return (data["data"], data["target"])
def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ):
lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
lowerCamelCase__ = xgb.predict(__lowerCAmelCase )
lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def A__ ( ):
lowerCamelCase__ = fetch_california_housing()
lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 50 | 0 |
'''simple docstring'''
import functools
def A__ ( A : list[int] , A : list[int]):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase) for day in days):
raise ValueError("The parameter days should be a list of integers")
if len(__lowerCAmelCase) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase) for cost in costs):
raise ValueError("The parameter costs should be a list of three integers")
if len(__lowerCAmelCase) == 0:
return 0
if min(__lowerCAmelCase) <= 0:
raise ValueError("All days elements should be greater than 0")
if max(__lowerCAmelCase) >= 3_66:
raise ValueError("All days elements should be less than 366")
UpperCamelCase : Optional[Any] = set(__lowerCAmelCase)
@functools.cache
def dynamic_programming(A : int) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
for char in word:
lowerCamelCase = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
lowerCamelCase = set()
for token in tokens:
lowerCamelCase = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase = list(__lowerCAmelCase )
return word_list
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowerCamelCase = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase = bert_tokens
lowerCamelCase , lowerCamelCase = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase = True
if is_chinese(bert_word[start] ):
lowerCamelCase = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase = """##""" + bert_word[j]
lowerCamelCase = start + i
lowerCamelCase = False
break
if single_word:
start += 1
return bert_word
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : LTP , lowerCamelCase__ : BertTokenizer ):
'''simple docstring'''
lowerCamelCase = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase = []
for id in input_ids:
lowerCamelCase = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase = f.readlines()
lowerCamelCase = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase = LTP(args.ltp ) # faster in GPU device
lowerCamelCase = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
UpperCAmelCase : Any = parser.parse_args()
main(args)
| 457 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : str ):
"""simple docstring"""
__snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , '''width_multiplier''' ) )
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Optional[Any] , a__ : str , a__ : Tuple=13 , a__ : Dict=64 , a__ : int=2 , a__ : str=3 , a__ : List[Any]="swish" , a__ : Union[str, Any]=3 , a__ : Optional[int]=32 , a__ : List[str]=0.1 , a__ : Optional[Any]=0.0_2 , a__ : Optional[Any]=True , a__ : Any=True , a__ : List[str]=10 , a__ : Tuple=None , a__ : Optional[int]=0.2_5 , a__ : List[str]=0.0 , a__ : Optional[Any]=0.0 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = make_divisible(512 * width_multiplier , divisor=8 )
__snake_case = hidden_act
__snake_case = conv_kernel_size
__snake_case = output_stride
__snake_case = classifier_dropout_prob
__snake_case = use_labels
__snake_case = is_training
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = scope
__snake_case = width_multiplier
__snake_case = ffn_dropout
__snake_case = attn_dropout
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def a (self : int ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def a (self : Optional[Any] , a__ : Optional[int] , a__ : List[Any] , a__ : List[Any] , a__ : Any ):
"""simple docstring"""
__snake_case = MobileViTVaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a (self : Tuple , a__ : Any , a__ : Any , a__ : List[str] , a__ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = MobileViTVaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Optional[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Any , a__ : str ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = MobileViTVaForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : str = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : str = False
A_ : Any = False
A_ : List[str] = False
A_ : List[str] = False
def a (self : str ):
"""simple docstring"""
__snake_case = MobileViTVaModelTester(self )
__snake_case = MobileViTVaConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def a (self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def a (self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : str ):
"""simple docstring"""
pass
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def a (self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def a (self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] ):
__snake_case = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case = outputs.hidden_states
__snake_case = 5
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case = 2
for i in range(len(_lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def a (self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = MobileViTVaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCamelCase__ ( ) -> Optional[int]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : List[Any] ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def a (self : Tuple ):
"""simple docstring"""
__snake_case = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_lowerCAmelCase )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case = model(**_lowerCAmelCase )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__snake_case = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def a (self : Tuple ):
"""simple docstring"""
__snake_case = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__snake_case = model.to(_lowerCAmelCase )
__snake_case = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__snake_case = prepare_img()
__snake_case = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case = model(**_lowerCAmelCase )
__snake_case = outputs.logits
# verify the logits
__snake_case = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowerCAmelCase )
__snake_case = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def a (self : int ):
"""simple docstring"""
__snake_case = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__snake_case = model.to(_lowerCAmelCase )
__snake_case = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__snake_case = prepare_img()
__snake_case = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case = model(**_lowerCAmelCase )
__snake_case = outputs.logits.detach().cpu()
__snake_case = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(50, 60)] )
__snake_case = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
__snake_case = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
__snake_case = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
| 592 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
'''simple docstring'''
from functools import reduce
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __UpperCamelCase ( lowercase_ : str = N ):
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase_ , lowercase_ : str(int(__lowerCAmelCase ) * int(__lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(__lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 536 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase__ : Optional[int] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowercase__ : int = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowercase__ : Tuple = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def snake_case__ ( self : Optional[int] , __lowercase : str , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = 0.0
for i, j in zip(_lowerCAmelCase , _lowerCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_lowerCAmelCase , _lowerCAmelCase ) else 0.0
snake_case_ = n_correct / len(_lowerCAmelCase )
return {
"accuracy": accuracy,
}
| 376 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase : int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 50 | 0 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BigBirdConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE__ = BigBirdForQuestionAnswering(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = BigBirdForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCAmelCase , __lowerCAmelCase , is_trivia_qa=__lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 493 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
from __future__ import annotations
lowerCamelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = graph
# mapping node to its parent in resulting breadth first tree
UpperCamelCase__ = {}
UpperCamelCase__ = source_vertex
def UpperCAmelCase_ (self ):
UpperCamelCase__ = {self.source_vertex}
UpperCamelCase__ = None
UpperCamelCase__ = [self.source_vertex] # first in first out queue
while queue:
UpperCamelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_lowerCAmelCase )
UpperCamelCase__ = vertex
queue.append(_lowerCAmelCase )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCamelCase__ = self.parent.get(_lowerCAmelCase )
if target_vertex_parent is None:
UpperCamelCase__ = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(_lowerCAmelCase )
return self.shortest_path(_lowerCAmelCase ) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCamelCase_ = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 513 |
'''simple docstring'''
from PIL import Image
def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 50 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_SCREAMING_SNAKE_CASE =['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_SCREAMING_SNAKE_CASE ={'unk_token': '<unk>'}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE ={
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def A ( self : List[Any] , **_a : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def A ( self : str , **_a : Optional[Any] ) -> Dict:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def A ( self : Optional[Any] , **_a : List[str] ) -> List[Any]:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A ( self : Dict ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def A ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def A ( self : Tuple ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_lowerCAmelCase , return_tensors='np' )
_SCREAMING_SNAKE_CASE =processor(images=_lowerCAmelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE ='lower newer'
_SCREAMING_SNAKE_CASE =processor(text=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE =tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE ='lower newer'
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def A ( self : Tuple ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def A ( self : Dict ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE ='lower newer'
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 405 |
'''simple docstring'''
def A__ ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCamelCase : Dict = generate_large_matrix()
UpperCamelCase : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__ ( __lowerCAmelCase : list[list[int]] ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase__ = (left + right) // 2
lowerCamelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def A__ ( __lowerCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def A__ ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowerCamelCase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 50 | 0 |
def lowerCAmelCase__( lowercase : int ) -> List[Any]:
__snake_case : int = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 243 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase : List[Any] = 'examples/'
UpperCamelCase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase : Any = 'README.md'
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern]
lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start of the list.
lowerCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def A__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ = default_version.base_version
elif patch:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A__ ( ):
lowerCamelCase__ = get_version()
lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 50 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class a_ ( UpperCAmelCase__ ):
lowercase_ : Dict = '''blip_text_model'''
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[Any]=3_0_5_2_4 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Tuple=3_0_7_2 , __lowerCAmelCase : Dict=7_6_8 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : str=8 , __lowerCAmelCase : Union[str, Any]=5_1_2 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : List[str]=1E-12 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Any=3_0_5_2_2 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Any=0 , __lowerCAmelCase : Union[str, Any]=1_0_2 , __lowerCAmelCase : str=True , __lowerCAmelCase : Union[str, Any]=True , **__lowerCAmelCase : List[Any] , ):
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , sep_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = encoder_hidden_size
__snake_case = intermediate_size
__snake_case = projection_dim
__snake_case = hidden_dropout_prob
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = max_position_embeddings
__snake_case = layer_norm_eps
__snake_case = hidden_act
__snake_case = initializer_range
__snake_case = attention_probs_dropout_prob
__snake_case = is_decoder
__snake_case = use_cache
@classmethod
def lowercase__ ( cls : List[str] , __lowerCAmelCase : str , **__lowerCAmelCase : Optional[Any] ):
cls._set_token_in_kwargs(_lowerCAmelCase )
__snake_case , __snake_case = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__snake_case = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class a_ ( UpperCAmelCase__ ):
lowercase_ : int = '''blip_vision_model'''
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any]=7_6_8 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : List[Any]=1_2 , __lowerCAmelCase : List[Any]=3_8_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Any=1E-10 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
__snake_case = hidden_size
__snake_case = intermediate_size
__snake_case = projection_dim
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = patch_size
__snake_case = image_size
__snake_case = initializer_range
__snake_case = attention_dropout
__snake_case = layer_norm_eps
__snake_case = hidden_act
@classmethod
def lowercase__ ( cls : Union[str, Any] , __lowerCAmelCase : Dict , **__lowerCAmelCase : Union[str, Any] ):
cls._set_token_in_kwargs(_lowerCAmelCase )
__snake_case , __snake_case = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__snake_case = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class a_ ( UpperCAmelCase__ ):
lowercase_ : Optional[Any] = '''blip'''
lowercase_ : Tuple = True
def __init__( self : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Any=5_1_2 , __lowerCAmelCase : Optional[Any]=2.6592 , __lowerCAmelCase : Dict=2_5_6 , **__lowerCAmelCase : Any , ):
super().__init__(**_lowerCAmelCase )
if text_config is None:
__snake_case = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__snake_case = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__snake_case = BlipTextConfig(**_lowerCAmelCase )
__snake_case = BlipVisionConfig(**_lowerCAmelCase )
__snake_case = self.vision_config.hidden_size
__snake_case = projection_dim
__snake_case = logit_scale_init_value
__snake_case = 1.0
__snake_case = 0.02
__snake_case = image_text_hidden_size
@classmethod
def lowercase__ ( cls : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , **__lowerCAmelCase : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCAmelCase )
def lowercase__ ( self : Dict ):
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.text_config.to_dict()
__snake_case = self.vision_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase :List[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ : List[str] = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ : Union[str, Any] = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : int = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__magic_name__ : List[Any] = expected_configs[0]
assert expected_config in infos
__magic_name__ : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
__magic_name__ : Dict = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
__magic_name__ : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 561 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __lowerCAmelCase : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def A__ ( __lowerCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = set()
for token in tokens:
lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase__ = list(__lowerCAmelCase )
return word_list
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase__ = bert_tokens
lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase__ = True
if is_chinese(bert_word[start] ):
lowerCamelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase__ = """##""" + bert_word[j]
lowerCamelCase__ = start + i
lowerCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ):
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = []
for id in input_ids:
lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__ ( __lowerCAmelCase : Optional[int] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase : Any = parser.parse_args()
main(args)
| 50 | 0 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def SCREAMING_SNAKE_CASE__ ( self , **lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : List[str] = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self , **lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Tuple = self.scheduler_classes[0]
UpperCamelCase : Optional[int] = self.get_scheduler_config(**_lowerCAmelCase )
UpperCamelCase : str = scheduler_class(**_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : int = 10, 0.0
UpperCamelCase : List[str] = self.dummy_model()
UpperCamelCase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for t in scheduler.timesteps:
UpperCamelCase : int = model(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Tuple = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCAmelCase )
UpperCamelCase : int = self.scheduler_classes[0]
UpperCamelCase : Any = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase : Optional[int] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCAmelCase , eta=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : List[str] = self.scheduler_classes[0]
UpperCamelCase : str = self.get_scheduler_config()
UpperCamelCase : List[str] = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : Tuple = self.scheduler_classes[0]
UpperCamelCase : Tuple = self.get_scheduler_config()
UpperCamelCase : int = scheduler_class(**_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : str = 10, 0.0
scheduler.set_timesteps(_lowerCAmelCase )
UpperCamelCase : Dict = self.dummy_model()
UpperCamelCase : List[str] = self.dummy_sample_deter
UpperCamelCase : List[Any] = self.dummy_sample_deter + 0.1
UpperCamelCase : int = self.dummy_sample_deter - 0.1
UpperCamelCase : List[Any] = samplea.shape[0]
UpperCamelCase : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase : int = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
UpperCamelCase : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase : Any = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCAmelCase )
UpperCamelCase : Any = torch.sum(torch.abs(_lowerCAmelCase ) )
UpperCamelCase : int = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Tuple = self.full_loop()
UpperCamelCase : Optional[Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
UpperCamelCase : Optional[Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase : int = torch.sum(torch.abs(_lowerCAmelCase ) )
UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Tuple = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
UpperCamelCase : Union[str, Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
UpperCamelCase : Dict = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : int = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
UpperCamelCase : Any = torch.sum(torch.abs(_lowerCAmelCase ) )
UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 173 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50 | 0 |
def __lowerCamelCase ( ):
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase : Dict = generate_large_matrix()
UpperCAmelCase : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCamelCase ( lowerCamelCase__ : list[list[int]] ):
'''simple docstring'''
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def __lowerCamelCase ( lowerCamelCase__ : list[int] ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase = (left + right) // 2
lowerCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase = mid + 1
else:
lowerCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def __lowerCamelCase ( lowerCamelCase__ : list[list[int]] ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def __lowerCamelCase ( lowerCamelCase__ : list[list[int]] ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __lowerCamelCase ( lowerCamelCase__ : list[list[int]] ):
'''simple docstring'''
lowerCamelCase = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def __lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print("""Running benchmarks""" )
lowerCamelCase = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase = timeit(f'{func}(grid=grid)' , setup=__lowerCAmelCase , number=500 )
print(f'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 457 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[Any] , a__ : str=None , **a__ : List[str] ):
"""simple docstring"""
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _lowerCAmelCase , )
super().__init__(args=_lowerCAmelCase , **_lowerCAmelCase )
| 592 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50 | 0 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : int ):
"""simple docstring"""
a_ = []
for part_id in partition_order:
a_ = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__lowerCAmelCase ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCamelCase ( ):
"""simple docstring"""
a_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a_ = spark.range(100 ).repartition(1 )
a_ = Spark(__lowerCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCamelCase ( ):
"""simple docstring"""
a_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a_ = spark.range(10 ).repartition(2 )
a_ = [1, 0]
a_ = _generate_iterable_examples(__lowerCAmelCase , __lowerCAmelCase ) # Reverse the partitions.
a_ = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , __lowerCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCamelCase ( ):
"""simple docstring"""
a_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a_ = spark.range(10 ).repartition(1 )
a_ = SparkExamplesIterable(__lowerCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCamelCase ( ):
"""simple docstring"""
a_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
a_ = lambda lowercase_ : x.reverse()
a_ = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [2, 1, 0] )
a_ = SparkExamplesIterable(__lowerCAmelCase ).shuffle_data_sources(__lowerCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
a_ , a_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCamelCase ( ):
"""simple docstring"""
a_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
a_ , a_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
a_ , a_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCamelCase ( ):
"""simple docstring"""
a_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a_ = spark.range(100 ).repartition(1 )
a_ = Spark(__lowerCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 536 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ : Dict = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ['MaskFormerFeatureExtractor']
lowercase__ : Tuple = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
lowercase__ : Tuple = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 376 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase__ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ = src_path
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 50 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.